repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
konono/equlipse
|
openstack-install/charm/trusty/charm-keystone/tests/charmhelpers/core/strutils.py
|
Python
|
mit
| 3,680
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import re
def bool_from_string(value):
"""Interpret string value as boolean.
Returns True if value translates to True otherwise False.
"""
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't', 'on']:
return True
elif value in ['n', 'no', 'false', 'f', 'off']:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)
raise ValueError(msg)
def bytes_from_string(value):
"""Interpret human readable string value as bytes.
Returns int
"""
BYTE_POWER = {
'K': 1,
'KB': 1,
'M': 2,
'MB': 2,
'G': 3,
'GB': 3,
'T': 4,
'TB': 4,
'P': 5,
'PB': 5,
}
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if not matches:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[mat
|
ches.group(2)])
class BasicStringComparator(object):
"""Provides a clas
|
s that will compare strings from an iterator type object.
Used to provide > and < comparisons on strings that may not necessarily be
alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
z-wrap.
"""
_list = None
def __init__(self, item):
if self._list is None:
raise Exception("Must define the _list in the class definition!")
try:
self.index = self._list.index(item)
except Exception:
raise KeyError("Item '{}' is not in list '{}'"
.format(item, self._list))
def __eq__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index == self._list.index(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index < self._list.index(other)
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index > self._list.index(other)
def __le__(self, other):
return not self.__gt__(other)
def __str__(self):
"""Always give back the item at the index so it can be used in
comparisons like:
s_mitaka = CompareOpenStack('mitaka')
s_newton = CompareOpenstack('newton')
assert s_newton > s_mitaka
@returns: <string>
"""
return self._list[self.index]
|
noemis-fr/old-custom
|
e3z_account_export_grouped/account_export.py
|
Python
|
agpl-3.0
| 28,581
| 0.006621
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Asgard Ledger Export (ALE) module,
# Copyright (C) 2005 - 2013
# Héonium (http://www.heonium.com). All Right Reserved
#
# Asgard Ledger Export (ALE) module
# is free software: you can redistribute it and/or modify it under the terms
# of the Affero GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Asgard Ledger Export (ALE) module
# is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the Affero GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import base64
import copy
import time
import pooler
from osv import fields, osv
from tools.translate import _
from heo_common.hnm_lib import *
from heo_common.files_tools import *
import openerp.addons.decimal_precision as dp
class asgard_ledger_export_fields(osv.osv):
_inherit = "asgard.ledger.export.fields"
_columns = {
'field_account': fields.many2one('ir.model.fields', 'Fields', domain=[
('model', '=', 'account.export.grouped.statement.line.grouped')
],
help="Select which filed you want export."),
}
asgard_ledger_export_fields()
class asgard_ledger_export_statement(osv.osv):
"""
Liste des exports déja réalisé
"""
_inherit = "asgard.ledger.export.statement"
#def _get_grouped_ales_line(self, cr, uid, ids, field_name, arg, context=None):used for function field
def _compute_grouped_ales_line(self, cr, uid, ids, context=None):
result = {}
pool = pooler.get_pool(cr.dbname)
journal_period_obj = pool.get('account.journal.period')
grouped_ales_line_obj = pool.get('account.export.grouped.statement.line.grouped')
for ale in self.browse(cr, uid, ids, context={}):
if not ale.journal_period_i
|
d:
raise osv.except_os
|
v(_('No Journal/Period selected !'), _('You have to select Journal/Period before populate line.'))
jp_ids = journal_period_obj.read(cr, uid, map(lambda x:x.id,ale.journal_period_id), ['journal_id','period_id'])
start = time.time()
#suppression des lignes existantes
cr.execute('delete from account_export_grouped_statement_line_grouped where ales_id = %s',
([str(ale.id)]))
offset = 0
results = [1]
grouped_line_ids=[]
#
# while len(results):
# cr.execute('SELECT %s, ml.date, l.partner_ref, l.period_id \
# ,l.journal_id, sum(ml.credit) as credit, sum(ml.debit) as debit\
# ,l.account_id, l.company_id \
# FROM \
# asgard_ledger_export_statement_line l \
# inner join account_move_line ml on ml.id = l.move_line_id \
# WHERE l.partner_is_company = True GROUP BY ml.date, l.partner_ref,l.partner_is_company,l.period_id \
# ,l.journal_id \
# ,l.account_id, l.company_id \
# LIMIT 500 OFFSET %s',
# (( ale.id, str(offset))))
# results = cr.fetchall()
# _columns = {
# 'ales_id': fields.many2one('asgard.ledger.export.statement', 'Asgard Statement', required=True, ondelete='cascade', select=True),
# 'date': fields.date(string='Date Created Entry', required=True),
# 'partner_ref': fields.char(string='Partner ref'),
# 'period_id': fields.many2one('account.period', string='Period', required=True),
# 'journal_id': fields.many2one('account.journal', string='Journal', required=True),
# 'credit': fields.float('Credit'),
# 'debit': fields.float(string='Debit'),
# 'account_id': fields.many2one('account.account', string='Account', required=True),
# 'text_line': fields.text('Line exported', readonly=True,
# help="Value of the line when it's exported in file (From format field)"),
# 'company_id': fields.many2one('res.company', 'Company', required=True),
# }
# for id, date, partner_ref,partner_is_company, period_id ,journal_id, credit, debit ,account_id, company_id in results:
# ales_line_id = grouped_ales_line_obj.create(cr, uid, {
# 'ales_id': ale.id,
# 'date': date,
# 'partner_ref': partner_ref,
# 'period_id': period_id ,
# 'journal_id': journal_id,
# 'credit': credit,
# 'debit': debit ,
# 'account_id': account_id,
# 'company_id': company_id
# })
# # grouped_line_ids.append((4,ales_line_id)) # used for function field
# grouped_line_ids.append(ales_line_id)
#offset += len(results)
#result[ale.id]= grouped_line_ids # used for function field
#regoupement des particuliers
#while len(results):
#regoupement des pro par debit
#sum(round(ml.credit,2)) as credit, sum(round(ml.debit,2)) as debit
cr.execute('INSERT INTO account_export_grouped_statement_line_grouped \
(ales_id, date, partner_ref, move_id,period_id, journal_id, credit, debit, account_id, company_id, entry_name) \
SELECT l.ales_id , ml.date,l.partner_ref,l.move_id, l.period_id \
,l.journal_id, sum(ml.credit) as credit, sum(ml.debit) as debit\
,l.account_id, l.company_id, mv.name\
FROM \
asgard_ledger_export_statement_line l \
inner join account_move_line ml on ml.id = l.move_line_id \
inner join account_move mv on mv.id = ml.move_id \
WHERE l.ales_id = %s AND l.partner_is_company = True AND ml.credit =0 \
GROUP BY l.ales_id , ml.date, l.partner_ref, l.move_id, l.partner_is_company,l.period_id \
,l.journal_id \
,l.account_id, l.company_id , mv.name \
order by l.journal_id, ml.date, mv.name, l.account_id \
OFFSET %s',
(( ale.id, str(offset))))
#regoupement des pro par credit
cr.execute('INSERT INTO account_export_grouped_statement_line_grouped \
(ales_id, date, partner_ref, move_id, period_id, journal_id, credit, debit, account_id, company_id, entry_name) \
SELECT l.ales_id , ml.date,l.partner_ref,l.move_id, l.period_id \
,l.journal_id, sum(ml.credit) as credit, sum(ml.debit) as debit\
,l.account_id, l.company_id , mv.name\
FROM \
asgard_ledger_export_statement_line l \
inner join account_move_line ml on ml.id = l.move_line_id \
inner join account_move mv on mv.id = ml.move_id \
WHERE l.ales_id = %s AND l.partner_is_company = True AND ml.debit=0 \
GROUP BY l.ales_id ,ml.date, l.partner_ref, l.move_id, l.partner_is_company,l.period_id \
,l.journal_id \
,l.account_id, l.company_id , mv.name \
order by l.journal_id, ml.date, mv.name, l.account_id \
OFFSET %s',
(( ale.id, str(offset))))
#regoupement des particulier par debit
cr.execute('INSERT INTO account_export_grouped_statement_line_grouped \
(ales_id, date, partner_ref, period_id, journal_id, cred
|
dennissergeev/classcode
|
lib/equil_run.py
|
Python
|
cc0-1.0
| 5,877
| 0.029607
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import sys
#
# use pretty plotting if it can be imported
#
try:
import seaborn
except:
pass
sigma=5.67e-8
def find_tau(tot_trans,num_layers):
"""
# -TD- document using
"""
trans_layer=tot_trans**(1./num_layers)
tau_layer= -1.*np.log(trans_layer)
tau_layers=np.ones([num_layers])*tau_layer
tau_levels=np.cumsum(tau_layers)
tau_levels=np.concatenate(([0],tau_levels))
return tau_levels
def find_heights(press_levels,rho_layers):
"""
-TD- docstring using google style
"""
Rd=287.
g=9.8
press_layers=(press_levels[1:] + press_levels[:-1])/2.
del_press=(press_levels[1:] - press_levels[0:-1])
rho_layers=press_layers/(Rd*Temp_layers)
del_z= -1.*del_press/(rho_layers*g)
level_heights=np.cumsum(del_z)
level_heights=np.concatenate(([0],level_heights))
return level_heights
def fluxes(tau_levels,Temp_layers,T_surf):
"""
-TD- docstring using google style
"""
up_rad=np.empty_like(tau_levels)
down_rad=np.empty_like(tau_levels)
sfc_rad=sigma*T
|
_surf**4.
up_rad[0]=sfc_rad
tot_levs=len(tau_levels)
for index in np.arange(1,tot_levs):
upper_lev=index
lower_lev=index - 1
layer_
|
num=index-1
del_tau=tau_levels[upper_lev] - tau_levels[lower_lev]
trans=np.exp(-1.666*del_tau)
emiss=1 - trans
layer_rad=sigma*Temp_layers[layer_num]**4.*emiss
up_rad[upper_lev]=trans*up_rad[lower_lev] + layer_rad
down_rad[tot_levs-1]=0
for index in np.arange(1,tot_levs):
upper_lev=tot_levs - index
lower_lev=tot_levs - index -1
layer_num=tot_levs - index - 1
del_tau=tau_levels[upper_lev] - tau_levels[lower_lev]
trans=np.exp(-1.666*del_tau)
emiss=1 - trans
layer_rad=sigma*Temp_layers[layer_num]**4.*emiss
down_rad[lower_lev]=down_rad[upper_lev]*trans + layer_rad
return (up_rad,down_rad)
def heating_rate(net_up,height_levels,rho_layers):
"""
-TD- docstring using google style
"""
cpd=1004.
dFn_dz= -1.*np.diff(net_up)/np.diff(height_levels)
dT_dt=dFn_dz/(rho_layers*cpd)
return dT_dt
def time_step(heating_rate,Temp_layers,delta_time):
"""
-TD- docstring using google style
"""
Temp_layers[:] = Temp_layers[:] + heating_rate*delta_time
return Temp_layers
if __name__=="__main__":
tot_trans=0.2
num_layers=100
p_sfc=1000.*1.e2
p_top=100.*1.e2
g=9.8
T_sfc=300.
Rd=287. #J/kg/K
num_levels=num_layers+1
tau_levels=find_tau(tot_trans,num_layers)
press_levels=np.linspace(p_top,p_sfc,num_levels)
press_diff=np.diff(press_levels)[0]
press_levels=press_levels[::-1]
press_layers=(press_levels[1:] + press_levels[:-1])/2.
Temp_levels=np.ones([num_levels])*T_sfc
Temp_layers=(Temp_levels[1:] + Temp_levels[:-1])/2.
S0=241.
Tc=273.15
delta_time_hr=30 #time interval in hours
delta_time_sec=30*3600. #time interval in seconds
stop_time_hr=600*24. #stop time in hours
times=np.arange(0,stop_time_hr,delta_time_hr) #times in hours
tot_loops=len(times)
num_times=len(times)
#
# -TD- comment which variables are defined on levels, and which on layers
#
sfc_temp=np.empty([num_times],dtype=np.float64)
hours=np.empty_like(sfc_temp)
#
# -TD- describe what the 2-d arrays are used for
#
air_temps=np.empty([num_layers,num_times],dtype=np.float64)
up_flux_run=np.empty([num_levels,num_times],dtype=np.float64)
down_flux_run=np.empty_like(up_flux_run)
height_levels_run=np.empty_like(up_flux_run)
for index in np.arange(0,num_times):
rho_layers=press_layers/(Rd*Temp_layers)
height_levels=find_heights(press_levels,rho_layers)
up,down=fluxes(tau_levels,Temp_layers,T_sfc)
sfc_temp[index]=T_sfc
#
# -TD- describe what this loop does
#
if np.mod(index,50)==0:
the_frac=np.int(index/tot_loops*100.)
sys.stdout.write("\rpercent complete: %d%%" % the_frac)
sys.stdout.flush()
air_temps[:,index]=Temp_layers[:]
up,down=fluxes(tau_levels,Temp_layers,T_sfc)
up_flux_run[:,index]=up[:]
down_flux_run[:,index]=down[:]
height_levels_run[:,index]=height_levels[:]
dT_dt=heating_rate(up-down,height_levels,rho_layers)
Temp_layers[:]=time_step(dT_dt,Temp_layers,delta_time_sec)
#
# -TD- describe what the following statements do
#
net_downsfc=S0 + down[0]
T_sfc=(net_downsfc/sigma)**0.25
plt.close('all')
fig1,axis1=plt.subplots(1,1)
snapshots=[0,2,8,30,40,50,60,70]
days=times/24.
for the_snap in snapshots:
#
# -TD- describe what the label does
#
label="%3.1f" % days[the_snap]
height_levels=height_levels_run[:,the_snap]
layer_heights=(height_levels[1:] + height_levels[:-1])/2.
axis1.plot(air_temps[:,the_snap],layer_heights*1.e-3,label=label)
axis1.legend()
axis1.set_title('temperature profiles for {} days'.format(len(snapshots)))
axis1.set_xlabel('temperature (deg C)')
fig1.savefig("snapshots.png")
fig2,axis2=plt.subplots(1,1)
axis2.plot(days,sfc_temp-Tc)
axis2.set_title('surface temperature (deg C)')
axis2.set_ylabel('temperature (degC)')
axis2.set_xlabel('day')
axis2.set_xlim((0,100))
fig2.savefig("sfc_temp.png")
fig3,axis3=plt.subplots(1,1)
axis3.plot(days,sfc_temp - air_temps[0,:])
axis3.set_title('air-sea temperature difference (deg C)')
axis3.set_ylabel('surface - first layer temp (degC)')
axis3.set_xlabel('day')
axis3.set_xlim((0,100))
fig3.savefig("air_sea.png")
plt.show()
|
SelvorWhim/competitive
|
Codewars/PlayingWithPassphrases.py
|
Python
|
unlicense
| 735
| 0.013605
|
class PassphraseMapper(object):
|
def __init__(self, n):
self.n = n
def __getitem__(self, c):
c = chr(c)
if c.isalpha(): # circular shift for letters
a = ord('a')
|
if c.islower() else ord('A')
return chr(a + ((ord(c) - a + self.n) % 26))
if c.isdigit(): # complement to 9 for digits
return str(9 - int(c))
return c # leave the rest as is
def play_pass(s, n):
step3 = s.translate(PassphraseMapper(n)) # steps 1-3 (out of context character translation)
step4 = "".join([step3[i].upper() if (i % 2 == 0) else step3[i].lower() for i in range(len(step3))]) # step 4 (character capitalization by index)
return step4[::-1] # step 5 (reversal)
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/pysaml2-2.4.0/doc/conf.py
|
Python
|
gpl-2.0
| 6,371
| 0.006749
|
# -*- coding: utf-8 -*-
#
# pysaml2 documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 24 08:13:41 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc
|
= 'index'
# General information about the project.
project = u'pysaml2'
copyright = u'2010-2011, Roland Hedberg'
# The version info for the project you're d
|
ocumenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.0beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysaml2doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pysaml2.tex', u'pysaml2 Documentation',
u'Roland Hedberg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
sein-tao/pyBioUtil
|
tests/test_decorator.py
|
Python
|
gpl-2.0
| 850
| 0.004706
|
#!/usr/bin/env python3
import unittest
TestCase = unittest.TestCase
from BioUtil.decorator import context_decorator
import inspect
print(__file__)
class TestContextDecorator(TestCase):
def test_no_enter(self):
with self.assertRaises(AttributeError):
class A:
pass
|
with A() as input:
pass
def test_decorator_enter(self):
|
# test no raise
@context_decorator
class A:
def __init__(self):
pass
with A() as input:
pass
def test_decorator_enter_override(self):
with self.assertRaises(NotImplementedError):
@context_decorator
class A:
def __enter__(self):
raise NotImplementedError()
with A() as input:
pass
|
tsudmi/json-database
|
setup.py
|
Python
|
mit
| 867
| 0
|
#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = [
'antlr4-python2-runtime==4.5',
'click==4.0'
]
setup(
name='json-database',
version='0.4.0',
author='D
|
mitri Chumak',
author_email='tsudmi@ut.ee',
url='https://github.com/tsudmi/json-database',
description='JSON Database is database which holds data in JSON format.',
long_description=open('README.md').read(),
packages=find_packages(),
install_requires=install_requires,
entry_points={
'console_scripts': ['json-database=json_database.command_line:main'],
},
test_suite="json_database.te
|
sts",
classifiers=[
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
yusufm/mobly
|
mobly/controllers/attenuator_lib/minicircuits.py
|
Python
|
apache-2.0
| 5,184
| 0.001157
|
#!/usr/bin/env python3.4
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module has the class for controlling Mini-Circuits RCDAT series
attenuators over Telnet.
See http://www.minicircuits.com/softwaredownload/Prog_Manual-6-Programmable_Attenuator.pdf
"""
from mobly.controllers import attenuator
from mobly.controllers.attenuator_lib import telnet_client
class AttenuatorDevice(object):
"""This provides a specific telnet-controlled implementation of
AttenuatorDevice for Mini-Circuits RC-DAT attenuators.
Attributes:
path_count: The number of signal attenuation path this device has.
"""
def __init__(self, path_count=1):
self.path_count = path_count
# The telnet client used to communicate with the attenuator device.
self._telnet_client = telnet_client.TelnetClient(
tx_cmd_separator="\r\n", rx_cmd_separator="\r\n", prompt="")
@property
def is_open(self):
"""This function returns the state of the telnet connection to the
underlying AttenuatorDevice.
Returns:
True if there is a successfully open connection to the
AttenuatorDevice.
"""
return bool(self._telnet_client.is_open)
def open(self, host, port=23):
"""Opens a telnet connection to the desired AttenuatorDevice and
queries basic information.
Args::
host: A valid hostname (IP address or DNS-resolvable name) to an
MC-DAT attenuator instrument.
port: An optional port number (defaults to telnet default 23)
"""
self._telnet_client.open(host, port)
config_str = self._telnet_client.cmd("MN?")
if config_str.startswith("MN="):
config_str = config_str[len("MN="):]
self.properties = dict(
zip(['model', 'max_freq', 'max_atten'], config_str.split("-", 2)))
self.max_atten = float(self.properties['max_atten'])
def close(self):
"""Closes a telnet connection to the desired attenuator device.
This should be called as part of any teardown procedure prior to the
attenuator instrument leaving scope.
"""
if self.is_open:
self._telnet_client.close()
def set_atten(self, idx, value):
"""Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error is raised if the underlying telnet connection to the
instrument is not open.
IndexError is raised if the index of the attenuator is greater than
the maximum index of the underlying instrument.
ValueError is raised if the requested set value is greater than the
maximum attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
if value > self.max_atten:
raise ValueError("Attenuator value out of range!", self.max_atten,
value)
# The actual device uses one-based index for channel numbers.
self._telnet_client.cmd("CHAN:%s:SETATT:%s" % (idx + 1, value))
def get_atten(self, idx=0):
"""This function returns the current attenuation from an attenuator at a
|
given index in the instrument.
Args:
|
idx: This zero-based index is the identifier for a particular
attenuator in an instrument.
Raises:
Error is raised if the underlying telnet connection to the
instrument is not open.
Returns:
A float that is the current attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count or idx < 0:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
atten_val_str = self._telnet_client.cmd("CHAN:%s:ATT?" % (idx + 1))
atten_val = float(atten_val_str)
return atten_val
|
andrej5elin/camera
|
camera/bfly.py
|
Python
|
mit
| 32,554
| 0.0184
|
"""A simple ctypes Wrapper of FlyCapture2_C API. Currently this only works with
graysale cameras in MONO8 or MONO16 pixel format modes.
Use it as follows:
>>> c = Camera()
First you must initialize.. To capture in MONO8 mode and in full resolution
run
>>> c.init(
|
) #default to MONO8
Init also turns off all auto features (shutter, exposure, gain..) automatically.
It sets brigtness level to zero and no gamma and sharpness a
|
djustment (for true raw image capture).
If MONO16 is to be used run:
>>> c.init(pixel_format = FC2_PIXEL_FORMAT_MONO16)
To set ROI (crop mode) do
>>> c.init(shape = (256,256)) #crop 256x256 image located around the sensor center.
Additionaly you can specify offset of the cropped image (from the top left corner)
>>> c.init(shape = (256,256), offset = (10,20))
shape and offset default to (None,None), which means that x and y dimensions and offset
are determined automaticaly and default to max image width and height and with offset
set so that crop is made around the sensor center.
So, you can set one of the dimensions in shape to None, which will result in
a full resolution capture in that dimension. To capture a full width horizontal strip
of height 100 pixels located around the sensor center do
>>> c.init(shape = (None,100))
To capture a 256x256 frame located in the center in horizontal direction and at top
in the vertical direction do
>>> c.init(shape = (256,256), offset = (None,0))
Now we can set some parameters.
>>> c.set_shutter(12.) #approx 12 ms
>>> c.get_shutter() #camera actually uses slightly different value
11.979341506958008
>>> c.set_gain(0.) #0dB
>>> c.get_gain()
0.0
Same can be done with (Absolute mode)
>>> c.set_parameter("shutter", 12.)
Or in integer mode
>>> c.set_parameter("shutter", value_int = 285)
>>> c.get_parameter("shutter")
{'value_int': 285L, 'auto': False, 'value': 11.979341506958008, 'on': True}
To capture image just call
>>> im = c.capture()
Actual image data (numpy array) is storred in converted_image attribute...
>>> im is c.converted_image
True
You can use numpy or opencv to save image to file, or use FlyCamera API to do it.
File type is guessed from extensiion..
>>> c.save_raw("raw.pgm") #saves raw image data (that has not yet been converted to numpy array)
>>> c.save_image("converted.pgm") #saves converted image data (that has been converted to numpy array)
These two images should be identical for grayscale cameras
>>> import cv2
>>> raw = cv2.imread("raw.pgm",cv2.IMREAD_GRAYSCALE)
>>> converted = cv2.imread("converted.pgm",cv2.IMREAD_GRAYSCALE)
>>> np.allclose(raw,converted)
True
save_raw, converts raw data to a given file format. To dump true raw data to file use:
>>> c.save_raw("raw.raw") #".raw" extension is meant for true raw data write.
>>> import numpy as np
>>> true_raw = np.fromfile("raw.raw",dtype = "uint8")
>>> np.allclose(true_raw, raw.flatten())
True
To capture video do:
>>> c.set_frame_rate(10.) #10 fps
Then you need to call the video() method. This method returns a generator (for speed).
So you need to iterate over images and do copying if you need to push frames into memory.
To create a list of frames (numpy arrays) do
>>> [(t,im.copy()) for t,im in c.video(10, timestamp = True)] #create a list of 10 frames video with timestamp
You should close when done:
>>> c.close()
"""
from ctypes import *
import logging as logger
import platform,os
import numpy as np
import warnings
import time
import cv2
from camera.base_camera import BaseCamera
#logger.basicConfig(level = logger.DEBUG)
logger.basicConfig(level = logger.INFO)
if platform.architecture()[0] == '64bit':
LIBNAME = 'FlyCapture2_C'
else:
LIBNAME = 'FlyCapture2_C'
flylib = cdll.LoadLibrary(LIBNAME)
#constants from #defines and enum constants inFlyCapture2Defs_C.h
FC2_ERROR_OK = 0
MAX_STRING_LENGTH = 512
FULL_32BIT_VALUE = 0x7FFFFFFF
#fc2ImageFileFormat enum
FC2_FROM_FILE_EXT = -1#, /**< Determine file format from file extension. */
FC2_PGM = 0#, /**< Portable gray map. */
FC2_PPM = 1#, /**< Portable pixmap. */
FC2_BMP = 2#, /**< Bitmap. */
FC2_JPEG = 3#, /**< JPEG. */
FC2_JPEG2000 = 4#, /**< JPEG 2000. */
FC2_TIFF = 5#, /**< Tagged image file format. */
FC2_PNG = 6#, /**< Portable network graphics. */
FC2_RAW = 7 #, /**< Raw data. */
FC2_IMAGE_FILE_FORMAT_FORCE_32BITS = FULL_32BIT_VALUE
#fc2PixelFormat enums
FC2_PIXEL_FORMAT_MONO8 = 0x80000000#, /**< 8 bits of mono information. */
FC2_PIXEL_FORMAT_411YUV8 = 0x40000000#, /**< YUV 4:1:1. */
FC2_PIXEL_FORMAT_422YUV8 = 0x20000000#, /**< YUV 4:2:2. */
FC2_PIXEL_FORMAT_444YUV8 = 0x10000000#, /**< YUV 4:4:4. */
FC2_PIXEL_FORMAT_RGB8 = 0x08000000#, /**< R = G = B = 8 bits. */
FC2_PIXEL_FORMAT_MONO16 = 0x04000000#, /**< 16 bits of mono information. */
FC2_PIXEL_FORMAT_RGB16 = 0x02000000#, /**< R = G = B = 16 bits. */
FC2_PIXEL_FORMAT_S_MONO16 = 0x01000000#, /**< 16 bits of signed mono information. */
FC2_PIXEL_FORMAT_S_RGB16 = 0x00800000#, /**< R = G = B = 16 bits signed. */
FC2_PIXEL_FORMAT_RAW8 = 0x00400000#, /**< 8 bit raw data output of sensor. */
FC2_PIXEL_FORMAT_RAW16 = 0x00200000#, /**< 16 bit raw data output of sensor. */
FC2_PIXEL_FORMAT_MONO12 = 0x00100000#, /**< 12 bits of mono information. */
FC2_PIXEL_FORMAT_RAW12 = 0x00080000#, /**< 12 bit raw data output of sensor. */
FC2_PIXEL_FORMAT_BGR = 0x80000008#, /**< 24 bit BGR. */
FC2_PIXEL_FORMAT_BGRU = 0x40000008#, /**< 32 bit BGRU. */
FC2_PIXEL_FORMAT_RGB = FC2_PIXEL_FORMAT_RGB8#, /**< 24 bit RGB. */
FC2_PIXEL_FORMAT_RGBU = 0x40000002#, /**< 32 bit RGBU. */
FC2_PIXEL_FORMAT_BGR16 = 0x02000001#, /**< R = G = B = 16 bits. */
FC2_PIXEL_FORMAT_BGRU16 = 0x02000002#, /**< 64 bit BGRU. */
FC2_PIXEL_FORMAT_422YUV8_JPEG = 0x40000001#, /**< JPEG compressed stream. */
FC2_NUM_PIXEL_FORMATS = 20#, /**< Number of pixel formats. */
FC2_UNSPECIFIED_PIXEL_FORMAT = 0 #/**< Unspecified pixel format. */
#fc2PropertyType enums
FC2_BRIGHTNESS = 0
FC2_AUTO_EXPOSURE = 1
FC2_SHARPNESS = 2
FC2_WHITE_BALANCE = 3
FC2_HUE = 4
FC2_SATURATION = 5
FC2_GAMMA = 6
FC2_IRIS = 7
FC2_FOCUS = 8
FC2_ZOOM = 9
FC2_PAN = 10
FC2_TILT = 11
FC2_SHUTTER = 12
FC2_GAIN = 13
FC2_TRIGGER_MODE = 14
FC2_TRIGGER_DELAY = 15
FC2_FRAME_RATE = 16
FC2_TEMPERATURE = 17
FC2_UNSPECIFIED_PROPERTY_TYPE = 18
FC2_PROPERTY_TYPE_FORCE_32BITS = FULL_32BIT_VALUE
#parameter name map. These are names as defined in FlyCapteure software
PARAMETER = {"brightness" : FC2_BRIGHTNESS,
"exposure" : FC2_AUTO_EXPOSURE,
"sharpness" : FC2_SHARPNESS,
"gamma" : FC2_GAMMA,
"shutter" : FC2_SHUTTER,
"gain" : FC2_GAIN,
"frame_rate" : FC2_FRAME_RATE}
#c_types of typedefs and typdef enums in FlyCapture2Defs_C.h
BOOL = c_int
fc2PropertyType = c_int
fc2Mode = c_int
fc2InterfaceType = c_int
fc2DriverType = c_int
fc2BusSpeed = c_int
fc2PCIeBusSpeed = c_int
fc2BayerTileFormat = c_int
fc2PixelFormat = c_int
fc2ImageFileFormat = c_int
fc2Context = c_void_p
fc2ImageImpl = c_void_p
class fc2Format7Info(Structure):
_fields_ = [("mode", fc2Mode),
("maxWidth", c_uint),
("maxHeight", c_uint),
("offsetHStepSize", c_uint),
("offsetVStepSize", c_uint),
("imagetHStepSize", c_uint),
("imageVStepSize", c_uint),
("pixelFormatBitField", c_uint),
("vendorPixelFormatBitField", c_uint),
("packetSize", c_uint),
("minPacketSize", c_uint),
("maxPacketSize", c_uint),
("percentage", c_float),
("reserved", c_uint*16)]
class fc2Format7ImageSettings(Structure):
_fields_ = [("mode", fc2Mode),
("offsetX", c_uint),
("offsetY", c_uint),
("width", c_uint),
("height", c_uint),
("pixelFormat", fc2PixelFormat),
("r
|
mostofi/wannier90
|
examples/example33/kdotp_plot.py
|
Python
|
gpl-2.0
| 6,672
| 0.02473
|
import numpy as N
import sys as SYS
import os as OS
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib import rc
rc('text', usetex=True)
font = FontProperties()
font.set_size(20)
#------------------------------------------------------
# Extract W90 bands data
#------------------------------------------------------
W_band_dir = './'
W_band_file = 'bc2n_band.dat'
eF=-1.6103
data = N.loadtxt(W_band_dir+W_band_file)
list_xW = data[:,0]
bandsW = data[:,1:].transpose()-eF
#----------------------------------------------------------
# plot
#----------------------------------------------------------
fig = plt.figure(figsize=(10,7))
axplot = fig.add_subplot(111)
for band in bandsW:
axplot.plot(list_xW,band,linestyle='',label='ab initio (W90)', color='k',marker='o',markersize=2)
######################################
######################################
####### extract k.p coefficients ####
seed = 'bc2n'
filename_0 = seed + '-kdotp_0.dat'
filename_1 = seed + '-kdotp_1.dat'
filename_2 = seed + '-kdotp_2.dat'
# arrays containing pauli matrix coefficients proportional to
# kx, ky, kx**2, ky**2 and kx*ky, 5 entries in total
a1 = [0] * 5
a2 = [0] * 5
a3 = [0] * 6 # contains extra term independent of k
a0 = [0] * 5
# assign spatial directions
# indexes: 0=x, 1=y, 2=z
index_x = 2
index_y = 0
#### 0th order ####
data = N.loadtxt(filename_0)
re = data[:,0]
im = data[:,1]
# Az
a3[0] = 0.5*N.real(re[0]-re[3])
offset = re[0] - a3[0]
#### 1st order ####
data = N.loadtxt(filename_1)
re = data[:,0]
im = data[:,1]
full = re + im*1.j
# Az
# index_i*4 chooses the matrix chunk corresponding to the proper direction (the 4 factor comes from 2x2 bands),
# +1 gets the band-off-diagonal entry
a3[1] = 0.5*(full[index_x*4]-full[index_x*4+3]).real
a3[2] = 0.5*(full[index_y*4]-full[index_y*4+3]).real
# Ax
# index_i*4 chooses the matrix chunk corresponding to the proper direction (the 4 factor comes from 2x2 bands),
# +1 gets the band-off-diagonal entry
a1[0] = (full[index_x*4+1]).real
a1[1] = -(full[index_y*4+1]).imag
# Ay
# index_i*4 chooses the matrix chunk corresponding to the proper direction (the 4 factor comes from 2x2 bands),
# +1 gets the band-off-diagonal entry
a2[0] = -(full[index_x*4+1]).imag
a2[1] = (full[index_y*4+1]).real
# A0
# index_i*4 chooses the matrix chunk corresponding to the proper direction (the 4 factor comes from 2x2 bands),
# +1 gets the band-off-diagonal entry
a0[0] = 0.5*(full[index_x*4]+full[index_x*4+3]).real
a0[1] = 0.5*(full[index_y*4]+full[index_y*4+3]).real
#### 2nd order ####
data = N.loadtxt(filename_2)
re = data[:,0]
im = data[:,1]
full = re + im*1.j
# a1
# first index jumps 12=3*4 (3 for spatial indexes, 4=2x2 for bands)
# second index only jumps 4=2x2 for bands
# +1 gets the band-off-diagonal term
a1[2] = full[index_x*(12+4)+1].real
a1[3] = -full[index_y*(12+4)+1].imag
a1[4] = full[index_x*12+index_y*4+1].real + full[index_y*12+index_x*4+1].real
# a2
# first index jumps 12=3*4 (3 for spatial indexes, 4=2x2 for bands)
# second index only jumps 4=2x2 for bands
# +1 gets the band-off-diagonal term
a2[2] = -full[index_x*(12+4)+1].imag
a2[3] = full[index_y*(12+4)+1].real
a2[4] = -full[index_x*12+index_y*4+1].imag - full[index_y*12+index_x*4+1].imag
# a3
# first index jumps 12=3*4 (3 for spatial indexes, 4=2x2 for bands)
# second index only jumps 4=2x2 for bands
# +0 and +3 get the 11 and 22 band-diagonal terms
a3[3] = N.real(0.5*(full[index_x*(12+4)] - full[index_x*(12+4)+3]))
a3[4] = N.real(0.5*(full[index_y*(12+4)] - full[index_y*(12+4)+3]))
a3[5] = 0.5*N.real( (full[index_x*12+index_y*4]+full[index_y
|
*12+index_x*4] ) - (full[index_x*12+index_y*4+3] + full[index_y*12+index_x*4+3] ) )
# a0
a0[2] = N.real(0.5*(full[index_x*(12+4)] + full[index_x*(12+4)+3]))
a0[3] = N.real(0.5*(full[inde
|
x_y*(12+4)] + full[index_y*(12+4)+3]))
a0[4] = 0.5*N.real( (full[index_x*12+index_y*4]+full[index_y*12+index_x*4] ) + (full[index_x*12+index_y*4+3] + full[index_y*12+index_x*4+3] ) )
# set up k.p band dispersion
# energy is in eV
# linear coefficients are in units of eV*Ang
# quadratic coefficients are in units of eV*Ang**2
a0_angs = 1.0
bxfac = -0.5*0.231321
byfac = 0.0
bfac = N.sqrt( (bxfac)**(2) + (byfac)**(2) )
cos_xangle = bxfac/bfac
sin_yangle = byfac/bfac
kfac = bfac*2*N.pi/a0_angs
# define quantities to be plotted
kk_list = []
band_1 = []
band_2 = []
band_1lin = []
band_2lin = []
# k along x
num_k = 200
for ii in range(0,num_k+1):
kk = kfac*float(ii)/float(num_k)
kkx = cos_xangle*kk
kky = sin_yangle*kk
# coefficients with quadratic dispersion
a1_coeff = (a1[0])*(kkx) + (a1[1])*(kky) + (a1[2])*(kkx**(2)) + (a1[3])*(kky**(2)) + (a1[4])*(kky*kkx)
a2_coeff = (a2[0])*(kkx) + (a2[1])*(kky) + (a2[2])*(kkx**(2)) + (a2[3])*(kky**(2)) + (a2[4])*(kky*kkx)
a3_coeff = a3[0] + (a3[1])*(kkx) + (a3[2])*(kky) + (a3[3])*(kkx**(2)) + (a3[4])*(kky**(2)) + (a3[5])*(kky*kkx)
a0_coeff = a0[0]*(kkx) + a0[1]*(kky) + a0[2]*(kkx)**(2) + a0[3]*(kky)**(2) + (a0[4])*(kky*kkx)
eps = N.sqrt((N.abs(a1_coeff-a2_coeff*(1.j)))**(2) + (N.abs(a3_coeff))**(2))
kk_list.append(kk)
band_1.append(a0_coeff+eps+offset-eF)
band_2.append(a0_coeff-eps+offset-eF)
# coefficients with linear dispersion
a1_coeff = (a1[0])*(kkx) + (a1[1])*(kky)
a2_coeff = (a2[0])*(kkx) + (a2[1])*(kky)
a3_coeff = a3[0] + (a3[1])*(kkx) + (a3[2])*(kky)
a0_coeff = a0[0]*(kkx) + a0[1]*(kky)
eps = N.sqrt((N.abs(a1_coeff-a2_coeff*(1.j)))**(2) + (N.abs(a3_coeff))**(2))
band_1lin.append(a0_coeff+eps+offset-eF)
band_2lin.append(a0_coeff-eps+offset-eF)
axplot.plot(kk_list,band_1lin,color='b',linestyle='--',label='linear $k\cdot p$')
axplot.plot(kk_list,band_2lin,color='b',linestyle='--')
axplot.plot(kk_list,band_1,color='r',linestyle='-',label='quadratic $k\cdot p$')
axplot.plot(kk_list,band_2,color='r',linestyle='-')
######################################
######################################
## set up figure
axplot.set_xlim([list_xW[0],list_xW[-1]])
xtick_positions = [0.0,list_xW[len(list_xW)-1]]
xtick_labels = ['$\mathrm{S}$','$\mathrm{X}$']
axplot.set_xticks(xtick_positions)
axplot.set_xticklabels(xtick_labels,fontproperties=font)
for y_tick in axplot.yaxis.get_major_ticks():
y_tick.label1.set_fontsize(20)
# Set the yticks properties
axplot.set_ylabel('$E-E_{F}$ $\mathrm{(eV)}$',fontproperties=font)
axplot.axhline(0, color='k',linestyle='-',lw=0.25)
axplot.legend(loc=0,fancybox=True,shadow=True,prop=font)
fig.tight_layout()
plt.savefig('kdotp_bands_SX.pdf')
plt.show()
|
duct-tape/taped-tests
|
taped_tests/jobs.py
|
Python
|
bsd-3-clause
| 563
| 0
|
import redis
from django_rq import job
import logging
try:
import cPicle as pickle
excep
|
t ImportError:
im
|
port pickle
logger = logging.getLogger(__name__)
@job('test')
def test_job(testcase):
result = testcase.defaultTestResult()
logger.info('Invoking {}'.format(testcase))
testcase.run(result)
logger.info('Test result: {}'.format(result))
r = redis.StrictRedis(host='localhost', port=6379, db=0)
r.publish('test-results', pickle.dumps(result,
protocol=pickle.HIGHEST_PROTOCOL))
|
mayankjohri/wakka-package-manager
|
wakkacore/terminal.py
|
Python
|
gpl-2.0
| 3,148
| 0.006036
|
# This code is part of Wakka.
# Wakka is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Wakka is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Wakka; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Wakka is based on code from gtkPacman, and is copyright (C)2009-2010
# Mitchell Nemitz. gtkPacman is copyright (C)2005-2008 Stefano Esposito.
from vte import Terminal
class terminal(Terminal):
def __init__(self):
Terminal.__init__(self)
self.set_sensitive(False)
def do(self, queues):
names_queues = { "add": [], "remove": []}
for pac in queues["add"]:
names_queues["add"].append(pac.name)
continue
for pac in queues["remove"]:
names_queues["remove"].append(pac.name)
continue
inst_pacs = " ".join(names_queues["add"])
rem_pacs = " ".join(names_queues["remove"])
pacman = "pacman --noconfirm"
if inst_pacs and rem_pacs:
command = "%s -Sf %s; %s -R %s; exit\n" %(pacman, inst_pacs, pacman, rem_pacs)
elif inst_pacs:
command = "%s -Sf %s; exit\n" %(pacman, inst_pacs)
elif rem_pacs:
command = "%s -R %s; exit\n" %(pacman, rem_pacs)
else:
command = "exit\n"
self.fork_command()
self.feed_child(command)
def do_local(self, fname, queues):
names_queues = { "add": [], "remove": []}
for pac in queues["add"]:
names_queues["add"].append(pac.name)
continue
for pac in queues["remove"]:
names_queues["remove"].append(pac.name)
continue
inst_pacs = " ".join(names_queues["add"])
rem_pacs = " ".join(names_queues["remove"])
pacman = "pacman --noconfirm"
local = "%s -Uf %s" %(pacman, fname)
if inst_pacs and rem_pacs:
command = "%(pac)s -Sf %(inst)s; %(pac)s -R %(rem)s; %(loc)s; exit\n" %{"pac": pacman, "loc": local, "inst": inst_pacs, "rem": rem_pacs}
elif inst_pacs:
command = "%(pac)s -Sf %(inst)s; %(pac)s; %(loc)s; exit\n" %{"pac": pacman, "loc": local, "inst": inst_pacs}
elif rem_pacs:
command = "%(loc)s; %(pac)s -R %(re
|
m)s; %(loc)s; exit\n" %{"pac": pacman, "loc": local, "rem": rem_pacs}
else:
|
command = "%s; exit\n" %local
self.fork_command()
self.feed_child(command)
def do_upgrade(self):
self.fork_command()
self.feed_child("pacman -Su --noconfirm; exit\n")
def close(self, term, close_button):
close_button.show()
return
|
MBALearnsToCode/PyMathFunc
|
MathFunc/__init__.py
|
Python
|
mit
| 13,313
| 0.002704
|
from __future__ import print_function
from CompyledFunc import CompyledFunc
from copy import copy as shallowcopy, deepcopy
from frozendict import frozendict
from HelpyFuncs.Dicts import combine_dict_and_kwargs, merge_dicts_ignoring_dup_keys_and_none_values
from HelpyFuncs.SymPy import sympy_allclose, sympy_xreplace
from MathDict import MathDict
from operator import index, pos, neg, abs, lt, le, eq, ne, ge, gt, add, sub, mul, div, truediv, floordiv, mod, pow
from pprint import pprint
from sympy import exp as e, log as ln, pprint as sympy_print, sqrt as square_root
class MathFunc:
def __init__(self, var_names_and_syms={}, mapping={}, param={}, cond={}, scope={}, compile=False):
if not hasattr(self, 'Vars'):
self.Vars = var_names_and_syms # {var_name: var_symbol} dict
if not hasattr(self, 'Param'):
self.Param = param
self.Cond = cond # {var_name: var_value} dict, var_value can be None if conditioning is generic
self.Scope = dict.fromkeys(set(var_names_and_syms) - set(cond))
vars_with_fixed_scope_values = {} # to keep track of scope variables with fixed values (i.e. points in space)
for var, value in scope.items():
if (var in self.Scope) and (value is not None):
self.Scope[var] = value # "points-in-space"
vars_with_fixed_scope_values[var] = value
s0 = set(vars_with_fixed_scope_values.items())
if hasattr(mapping, 'keys'):
self.Mapping = MathDict()
self.CondInstances = {}
for vars_and_values___frozen_dict, func_value in mapping.items():
if set(vars_and_values___frozen_dict.items()) >= s0:
self.Mapping[vars_and_values___frozen_dict] = func_value
condition_instance = {}
for var in (set(vars_and_values___frozen_dict) & set(cond)):
condition_instance[var] = vars_and_values___frozen_dict[var]
self.CondInstances[vars_and_values___frozen_dict] = frozendict(condition_instance)
else:
self.Mapping = mapping
if not hasattr(self, 'CompyledFunc'):
self.CompyledFunc = None
if compile:
self.compile()
def __repr__(self):
return 'MathFunc %s' % repr(self.Mapping)
def copy(self, deep=False):
if deep:
math_func = deepcopy(self)
else:
math_func = shallowcopy(self)
math_func.Cond = deepcopy(math_func.Cond) # need to be careful with Cond and Scope
math_func.Scope = deepcopy(math_func.Scope) # need to be careful with Cond and Scope
math_func.CompyledFunc = None # remove compiled version because changes are likely to be made on the copy
return math_func
def at(self, vars_and_values___dict={}, **kw_vars_and_values___dict):
vars_and_values___dict = combine_dict_and_kwargs(vars_and_values___dict, kw_vars_and_values___dict)
for var in (set(self.Vars) & set(vars_and_values___dict)):
vars_and_values___dict[self.Vars[var]] = vars_and_values___dict[var]
conds = self.Cond.copy()
scope = self.Scope.copy()
for var, value in vars_and_values___dict.items():
if var in conds:
conds.update({var: value})
if var in scope:
scope.update({var: value})
conds = sympy_xreplace(conds, vars_and_values___dict)
scope = sympy_xreplace(scope, vars_and_values___dict)
if hasattr(self.Mapping, 'keys'):
mapping = {}
for vars_and_values___frozen_dict, func_value in self.Mapping.items():
other_items___dict = dict(set(vars_and_values___frozen_dict.items()) -
set(vars_and_values___dict.items()))
if not (set(other_items___dict) and set(vars_and_values___dict)):
mapping[frozendict(set(vars_and_values___frozen_dict.items()) - set(conds.items()))] =\
sympy_xreplace(func_value, vars_and_values___dict)
else:
mapping = sympy_xreplace(self.Mapping, vars_and_values___dict)
return MathFunc(self.Vars.copy(), mapping, cond=conds, scope=scope)
def compile(self):
self.CompyledFunc = CompyledFunc(merge_dicts_ignoring_dup_keys_and_none_values(self.Vars, self.Param),
self.Mapping)
def __call__(self, var_and_param_names_and_values={}, **kw_var_and_param_names_and_values):
var_and_param_names_and_values = combine_dict_and_kwargs(var_and_param_names_and_values,
kw_var_and_param_names_and_values)
if var_and_param_names_and_values:
if self.CompyledFunc is None:
self.compile()
return self.CompyledFunc(var_and_param_names_and_values)
elif isinstance(self.Mapping, MathDict):
return self.Mapping()
else:
return self.Mapping
def optim(self, max_or_min=max, leave_unoptimized=None):
if max_or_min is max:
comp = ge
else:
comp = le
if leave_unoptimized:
comparison_bases = {}
conditioned_and_unoptimized_vars = set(self.Cond) | set(leave_unoptimized)
for vars_and_values___frozen_dict in self.Mapping:
comparison_basis = {}
for var in (set(vars_and_values___frozen_dict) & conditioned_and_unoptimized_vars):
comparison_basis[var] = vars_and_values___frozen_dict[var]
comparison_bases[vars_and_values___frozen_dict] = frozendict(comparison_basis)
else:
comparison_bases = self.CondInstances
optim_values = {}
for vars_and_values___frozen_dict, func_value in self.Mapping.items():
comparison_basis = comparison_bases[vars_and_values___frozen_dict]
if comparison_basis in optim_values:
optim_values[comparison_basis] = max_or_min(optim_values[comparison_basis], func_value)
else:
optim_values[comparison_basis] = func_value
optims = {}
for vars_and_values___frozen_dict, func_value in self.Mapping.items():
if comp(func_value, optim_values[comparison_bases[vars_and_values___frozen_dict]]):
optims[vars_and_values___frozen_dict] = func_value
return MathFunc(self.Vars.copy(), optims, cond=self.Cond.copy(), scope=self.Scope.copy())
def marg(self, *marginalized_vars, **kwargs):
itself = lambda x: x
if 'transf' in kwargs:
transf_func = kwargs['transf']
else:
transf_func = itself
if 'reduce_func' in kwargs:
reduce_func = kwargs['reduce_func']
else
|
:
reduce_func = add
if 'rev_transf' in kwargs:
rev_transf_func = kwargs['rev_transf']
else:
rev_transf_func = itself
var_names_and_symbols___dict = self.Vars.copy() # just to be careful
scope = self.Scope.copy() # just to be careful
mapping = self.Mapping.copy() # just to be careful
for marginalized_var in marginalized_vars:
del var_names_and_symbols___dict[marginali
|
zed_var]
del scope[marginalized_var]
d = {}
for vars_and_values___frozen_dict, func_value in mapping.items():
marginalized_var_value = vars_and_values___frozen_dict[marginalized_var]
fd = frozendict(set(vars_and_values___frozen_dict.items()) -
{(marginalized_var, marginalized_var_value)})
if fd in d:
d[fd] = reduce_func(d[fd], transf_func(func_value))
else:
d[fd] = transf_func(func_value)
mapping = {k: rev_transf_func(v) for k, v in d.items()}
|
lertech/extra-addons
|
network/model/digitalocean/Size.py
|
Python
|
gpl-3.0
| 330
| 0
|
class Size(object):
def __init__(self, client_id="", api_key=""):
self.client_id = client_id
self.api_key = api_key
self.na
|
me = None
self.id = None
self.memory = None
self.cpu = None
self.disk = No
|
ne
self.cost_per_hour = None
self.cost_per_month = None
|
drpjm/udacity-mle-project5
|
src/tfhelpers.py
|
Python
|
mit
| 2,195
| 0.010934
|
'''
Created on Oct 4, 2016
@author: pjmartin (but mostly the Google TF tutorial site)
'''
import tensorflow as tf
# variable declaration helper functions
# Make the weight variables be a little noisy around 0.0.
def weight_variable(shape, var_name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=var_name)
# creates a small positive bias
def bias_variable(shape, var_name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=var_name)
# variable_summaries collects the mean, stdev, max, and min
# values of the supplied variable, var.
def variable_summaries(var, name):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
# tf.histogram_summary(name, var)
# fully_conn_nn_layer generates a fully connected
# NN layer based on the supplied input_tensor, input, and output dims.
def fully_conn_nn_layer(input_tensor, in_dim, out_dim, layer_name, act_fn=tf.nn.relu):
with tf.name_s
|
cope(layer_name):
with tf.name_scope('weights'):
|
W = tf.Variable(tf.truncated_normal([in_dim, out_dim]))
variable_summaries(W, layer_name + '/weights')
with tf.name_scope('biases'):
b = tf.Variable(tf.random_normal([out_dim]))
variable_summaries(b, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
pre_act = tf.matmul(input_tensor, W) + b
tf.histogram_summary(layer_name + '/pre_acts_hist', pre_act)
acts = act_fn(pre_act, 'activations')
tf.histogram_summary(layer_name + '/activations', acts)
return acts, W, b
# Support functions for convolutional NN
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
# Maxpooling over 2x2 blocks
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
|
ua-snap/downscale
|
snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/crop_mask_resample_to_iem.py
|
Python
|
mit
| 6,482
| 0.039185
|
def resample_to_1km( x, template_raster_mask ):
'''
template_raster_mask should be a mask in in the res/extent/origin/crs of the
existing TEM IEM products.
'''
import rasterio, os
from rasterio.warp import RESAMPLING, reproject
import numpy
|
as np
fn = os.path.basename( x )
fn_split = fn.split( '.' )[0].split( '_' )
if '_cru_' in fn:
output_path = os.path.dirname( x ).replace( '/cru_ts31/', '/IEM/cru_ts31/' ) # hardwired!
fn_parts = ['variable', 'metric', 'model_1', 'model_2', 'kind', 'month', 'year']
fn_dict = dict( zip( fn_par
|
ts, fn_split ) )
fn_dict.update( scenario='historical', model='cru_ts31' )
else:
output_path = os.path.dirname( x ).replace( '/ar5/', '/IEM/ar5/' ) # hardwired!
fn_parts = ['variable', 'metric', 'model', 'scenario', 'ensemble', 'month', 'year']
fn_dict = dict( zip( fn_parts, fn_split ) )
try:
if not os.path.exists( output_path ):
os.makedirs( output_path )
except:
pass
fn_switch = { 'cld':'_'.join([ 'cld','mean','pct','iem',fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif',
'vap':'_'.join(['vap','mean','hPa','iem', fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif',
'tas':'_'.join(['tas','mean','C','iem',fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif',
'hur':'_'.join(['hur','mean','pct','iem',fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif' }
output_filename = os.path.join( output_path, fn_switch[ fn_dict[ 'variable' ] ] )
rst = rasterio.open( x )
rst_arr = rst.read( 1 )
template_arr = template_raster_mask.read( 1 )
template_meta = template_raster_mask.meta
template_meta.update( compress='lzw', nodata=rst.nodata )
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
output_arr = np.empty_like( template_arr.astype( np.float32 ) )
output_arr[ template_arr == 0 ] = rst.nodata
src_crs = {'init':'epsg:3338'}
dst_crs = {'init':'epsg:3338'}
reproject( rst_arr, output_arr, src_transform=rst.affine, src_crs=src_crs, src_nodata=rst.nodata, \
dst_transform=template_raster_mask.affine, dst_crs=dst_crs,\
dst_nodata=rst.nodata, resampling=RESAMPLING.cubic_spline, num_threads=2 )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
output_arr[ template_arr == 0 ] = rst.nodata
out.write( output_arr, 1 )
return output_filename
if __name__ == '__main__':
import os, glob, rasterio
import numpy as np
import pandas as pd
from functools import partial
from pathos import multiprocessing as mp
# some setup:
input_path = '/Data/malindgren/cru_november_final/ar5' # '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final/ar5'
template_raster_mask_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/extents/IEM_Mask_1km.tif'
ncores = 20
# read in the template raster mask must be 0-nodata, 1-data
template_raster_mask = rasterio.open( template_raster_mask_fn )
resample_to_1km_partial = partial( resample_to_1km, template_raster_mask=template_raster_mask )
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
variables = ['cld', 'vap' ] # ['tas'] # ['hur'] # run the HUR after the cld/vap which is required for TEM
# tas_files = sorted( glob.glob( os.path.join( tas_input_path, model, 'tas', 'downscaled', '*.tif' ) ) )
path_list = [ os.path.join( input_path, model, variable, 'downscaled', '*.tif' ) for model in models for variable in variables ]
# temporary for failes EOS run:
complete = '/Data/malindgren/cru_november_final/ar5/IPSL-CM5A-LR/cld/downscaled/*.tif'
path_list = [ path for path in path_list if path != complete ]
# end temporary
for path in path_list:
# print path
files = glob.glob( path )
# run it in parallel
pool = mp.Pool( processes=ncores )
pool.map( lambda x: resample_to_1km_partial( x=x ), files )
pool.close()
# for root, subs, files in os.walk( input_path ):
# if root.endswith( 'downscaled' ):
# # print 'running: %s' % root
# # add back in the file paths from the root
# files = [ os.path.join( root, i ) for i in files ]
# # run it in parallel
# pool = mp.Pool( processes=ncores )
# pool.map( lambda x: resample_to_1km_partial( x=x ), files )
# pool.close()
# # # # MAKE A MASK TO USE AS THE TEMPLATE RASTER
# template_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/extents/tas_mean_C_iem_cru_TS31_01_1901.tif'
# template = rasterio.open( template_fn )
# template_meta = template.meta
# template_mask = template.read_masks( 1 )
# template_arr = template.read( 1 )
# template_arr[ template_mask != 0 ] = 1
# template_arr[ template_mask == 0 ] = 0
# template_meta.update( compress='lzw', crs={'init':'epsg:3338'} )
# output_filename = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/extents/IEM_Mask_1km.tif'
# with rasterio.open( output_filename, 'w', **template_meta ) as out:
# out.write( template_arr, 1 )
# # # # #
# def standardized_fn_to_vars( fn ):
# ''' take a filename string following the convention for this downscaling and break into parts and return a dict'''
# fn = os.path.basename( fn )
# fn_split = fn.split( '.' )[0].split( '_' )
# if '_cru_' in fn:
# fn_parts = ['variable', 'metric', 'model_1', 'model_2', 'kind', 'month', 'year']
# fn_dict = dict( zip( fn_parts, fn_split ) )
# fn_dict.update( scenario='historical', model='cru_ts31' )
# # name_convention = [ 'variable', 'metric', 'model', 'scenario', 'experiment', 'begin_time', 'end_time' ]
# else:
# fn_parts = ['variable', 'metric', 'model', 'scenario', 'ensemble', 'month', 'year']
# fn_dict = dict( zip( fn_parts, fn_split ) )
# fn_switch = { 'cld':'_'.join([ 'cld','mean','pct','iem',fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif',
# 'vap':'_'.join(['vap','mean','hPa','iem', fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif',
# 'tas':'_'.join(['tas','mean','C','iem',fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif',
# 'hur':'_'.join(['hur','mean','pct','iem',fn_dict['model'],fn_dict['scenario'],fn_dict['month'], fn_dict['year'] ]) + '.tif' }
# output_filename = os.path.join( output_path, fn_switch[ fn_dict[ 'variable' ] ] )
# fn_list = fn.split( '.' )[0].split( '_' )
# return { i:j for i,j in zip( name_convention, fn_list )}
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/policy_set_result.py
|
Python
|
mit
| 1,252
| 0.000799
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PolicySetResult(Model):
"""Result of a policy set evaluation.
:param has_error: A value indicating whether this policy set evaluation
has discovered violations.
:type has_error: bool
:param policy_violations: The list of policy violations.
:type policy_violations:
list[~azure.mgmt.devtestlabs.models.PolicyViolation]
"""
_attribute_map = {
'has_error':
|
{'key': 'hasError', 'type': 'bool'},
'policy_violations': {'key': 'policyViolations', 'type': '[PolicyViolation]'},
}
def __init__(self, has_error=None, policy_violations=None):
super(PolicySetResult, self).__init__()
|
self.has_error = has_error
self.policy_violations = policy_violations
|
shaurz/devo
|
search.py
|
Python
|
mit
| 3,549
| 0.002254
|
import os, re, traceback
from dirtree_node import get_file_info
from util import is_text_file
class SearchAborted(Exception):
pass
def null_filter(info):
return True
class Search(object):
def __init__(self, path, match, output, file_filter=null_filter, dir_filter=null_filter):
self.path = path
self.match = match
self.output = output
self.file_filter = file_filter
self.dir_filter = dir_filter
self.encoding = "utf-8"
self.quit = False
def _search_file(self, filepath):
if self.quit:
raise SearchAborted()
self.output.begin_file(self, filepath)
if not is_text_file(filepath):
return
with open(filepath, "r") as f:
matched_file = False
for line_num, line in enumerate(f, 1):
line = line.rstrip("\r\n")
try:
line = line.decode(self.encoding)
except UnicodeDecodeError:
line = line.decode("latin-1")
if self.match(line):
if not matched_file:
self.output.add_file(self, filepath)
matched_file = True
self.output.add_line(self, line_num, line)
if self.quit:
raise SearchAborted()
if matched_file:
self.output.end_file(self)
def _search_dir(self, dirpath):
if self.quit:
raise SearchAborted()
try:
dirlist = os.listdir(dirpath)
except OSError:
pass
else:
dirlist.sort()
for name in dirlist:
self._search(dirpath, name)
def _search(self, dirpath, name):
if self.quit:
raise SearchAborted()
try:
info = get_file_info(dirpath, name)
if info.is_file and self.file_filter(info):
self._search_file(info.path)
elif info.is_dir and self.dir_filter(info):
self._search_dir(info.path)
except OSError:
pass
def search(self):
self.quit = False
try:
self._search(*os.path.split(self.path))
except SearchAborted:
self.output.abort_find(self)
except Exception as e:
self.output.end_find(self)
if not isinstance(e, (OSError, IOError)):
print traceback.format_exc()
else:
self.output.end_find(self)
def stop(self):
self.quit = True
class SearchFileOutput(object):
def __init__(self, file):
self.file = file
self.max_line_length = 100
def add_file(self, finder, filepath):
self.file.write(filepath + "\n")
def add_line(self, finder, line_nu
|
m, line):
if len(line) > self.max_line_length:
line = line[:self.max_
|
line_length] + "..."
self.file.write(" %d: %s\n" % (line_num, line))
def begin_file(self, finder, filepath):
pass
def end_file(self, finder):
self.file.write("\n")
def end_find(self, finder):
pass
def make_matcher(pattern, case_sensitive=True, is_regexp=False):
if not is_regexp:
pattern = "^.*" + re.escape(pattern)
flags = re.UNICODE
if not case_sensitive:
flags |= re.IGNORECASE
return re.compile(pattern, flags).search
if __name__ == "__main__":
import sys
Search(".", make_matcher("class"), SearchFileOutput(sys.stdout)).search()
|
melizalab/mspikes
|
mspikes/modules/random_sources.py
|
Python
|
gpl-3.0
| 1,590
| 0.006918
|
# -*- coding: utf-8 -*-
# -*- mode: python -*-
"""Sources of random data
Copyright (C) 2013 Dan Meliza <dmeliza@uchicago.edu>
Created Wed May 29 14:50:02 2013
"""
from mspikes import util
from mspikes.types import DataBlock, Source, Node, tag_set
from numpy.random import RandomState
class rand_samples(Source):
"""Generates random values from N(0,1)"""
seed = 1
nsamples = 4096
def __init__(self, **options):
util.set_option_attributes(self, options, seed=1, nsamples=4096)
self.chunk_size = 1024
self.channel = "random"
self.sampling_rate = 1
self._randg = RandomState(self.seed)
@classmethod
def options(cls, addopt_f, **defaults):
addopt_f("--seed",
help="seed for random number generator",
type=int,
metavar='INT',
default=defaults.get('seed',cls.seed))
addopt_f("--nsamples",
help="number of samples to generate",
type=int,
metavar='INT',
default=defaults.get('nsamples',cls.nsamples))
def
|
data(self, t=0):
"""Generates a data chunk"""
return DataBlock(id=self.channel, offset=t, ds=self.sampling_rate,
data=self._randg.randn(self.chunk_size),
tags=tag_set("samples"))
def __iter__(self):
t = 0
while t < self.nsamples:
data = self.data(t)
Node.send(self, data)
|
yield data
t += self.chunk_size
## TODO random_events
# Variables:
# End:
|
Chris7/cutadapt
|
cutadapt/report.py
|
Python
|
mit
| 10,176
| 0.025649
|
# coding: utf-8
"""
Routines for printing a report.
"""
from __future__ import print_function, division, absolute_import
import sys
from collections import namedtuple
from contextlib import contextmanager
import textwrap
from .adapters import BACK, FRONT, PREFIX, SUFFIX, ANYWHERE
from .modifiers import QualityTrimmer, AdapterCutter
from .filters import (NoFilter, PairedNoFilter, TooShortReadFilter, TooLongReadFilter,
DiscardTrimmedFilter, DiscardUntrimmedFilter, Demultiplexer, NContentFilter)
class Statistics:
def __init__(self, n, total_bp1, total_bp2):
"""
n -- total number of reads
total_bp1 -- number of bases in first reads
total_bp2 -- number of bases in second reads (set to None for single-end data)
"""
self.n = n
self.total_bp = total_bp1
self.total_bp1 = total_bp1
if total_bp2 is None:
self.paired = False
else:
self.paired = True
self.total_bp2 = total_bp2
self.total_bp += total_bp2
def collect(self, adapters_pair, time, modifiers, modifiers2, writers):
self.time = max(time, 0.01)
self.too_short = None
self.too_long = None
self.written = 0
self.written_bp = [0, 0]
self.too_many_n = None
# Collect statistics from writers/filters
for w in writers:
if isinstance(w, (NoFilter, PairedNoFilter, Demultiplexer)) or isinstance(w.filter, (DiscardTrimmedFilter, DiscardUntrimmedFilter)):
self.written += w.written
if self.n > 0:
self.written_fraction = self.written / self.n
self.written_bp = self.written_bp[0] + w.written_bp[0], self.written_bp[1] + w.written_bp[1]
elif isinstance(w.filter, TooShortReadFilter):
self.too_short = w.filtered
elif isinstance(w.filter, TooLongReadFilter):
self.too_long = w.filtered
elif isinstance(w.filter, NContentFilter):
self.too_many_n = w.filtered
assert self.written is not None
# Collect statistics from modifiers
self.with_adapters = [0, 0]
self.quality_trimmed_bp = [0, 0]
self.did_quality_trimming = False
for i, modifiers_list in [(0, modifiers), (1, modifiers2)]:
for modifier in modifiers_list:
if isinstance(modifier, QualityTrimmer):
self.quality_trimmed_bp[i] = modifier.trimmed_bases
self.did_quality_trimming = True
elif isinstance(modifier, AdapterCutter):
self.with_adapters[i] += modifier.with_adapters
self.with_adapters_fraction = [ (v / self.n if self.n > 0 else 0) for v in self.with_adapters ]
self.quality_trimmed = sum(self.quality_trimmed_bp)
self.quality_trimmed_fraction = self.quality_trimmed / self.total_bp if self.total_bp > 0 else 0.0
self.total_written_bp = sum(self.written_bp)
self.total_written_bp_fraction = self.total_written_bp / self.total_bp if self.total_bp > 0 else 0.0
if self.n > 0:
if self.too_short is not None:
self.too_short_fraction = self.too_short / self.n
if self.too_long is not None:
self.too_long_fraction = self.too_long / self.n
if self.too_many_n is not None:
self.too_many_n_fraction = self.too_many_n / self.n
ADAPTER_TYPES = {
BACK: "regular 3'",
FRONT: "regular 5'",
PREFIX: "anchored 5'",
SUFFIX: "anchored 3'",
ANYWHERE: "variable 5'/3'"
}
def print_error_ranges(adapter_length, error_rate):
print("No. of allowed errors:")
prev = 0
for errors in range(1, int(error_rate * adapter_length) + 1):
r = int(errors / error_rate)
print("{0}-{1} bp: {2};".format(prev, r - 1, errors - 1), end=' ')
prev = r
if prev == adapter_length:
print("{0} bp: {1}".format(adapter_length, int(error_rate * adapter_length)))
else:
print("{0}-{1} bp: {2}".format(prev, adapter_length, int(error_rate * adapter_length)))
print()
def print_histogram(d, adapter_length, n, error_rate, errors):
"""
Print a histogram. Also, print the no. of reads expected to be
trimmed by chance (assuming a uniform distribution of nucleotides in the reads).
d -- a dictionary mapping lengths of trimmed sequences to their respective frequency
adapter_length -- adapter length
n -- total no. of reads.
"""
h = []
for length in sorted(d):
# when length surpasses adapter_length, the
# probability does not increase anymore
estimated = n * 0.25 ** min(length, adapter_length)
h.append( (length, d[length], estimated) )
print("length", "count", "expect", "max.err", "error counts", sep="\t")
for length, count, estimate in h:
max_errors = max(errors[length].keys())
errs = ' '.join(str(errors[length][e]) for e in range(max_errors+1))
print(length, count, "{0:.1F}".format(estimate), int(error_rate*min(length, adapter_length)), errs, sep="\t")
print()
def print_adjacent_bases(bases, sequence):
"""
Print a summary of the bases preceding removed adapter sequences.
Print a warning if one of the bases is overrepresented and there are
at least 20 preceding bases available.
Return whether a warning was printed.
"""
total = sum(bases.values())
if total == 0:
return False
print('Bases preceding removed adapters:')
warnbase = None
for base in ['A', 'C', 'G', 'T', '']:
b = base if base != '' else 'none/other'
fraction = 1.0 * bases[base] / total
print(' {0}: {1:.1%}'.fo
|
rmat(b, fraction))
if fraction > 0.8 and base != '':
warnbase = b
if total >= 20 and warnbase is not None:
print('WARNING:')
print(' The adapter is preceded by "{0}" extremely often.'.format(warnbase))
print(' The provided adapter seque
|
nce may be incomplete.')
print(' To fix the problem, add "{0}" to the beginning of the adapter sequence.'.format(warnbase))
print()
return True
print()
return False
@contextmanager
def redirect_standard_output(file):
if file is None:
yield
return
old_stdout = sys.stdout
sys.stdout = file
yield
sys.stdout = old_stdout
def print_report(stats, adapters_pair):
"""Print report to standard output."""
if stats.n == 0:
print("No reads processed! Either your input file is empty or you used the wrong -f/--format parameter.")
return
print("Finished in {0:.2F} s ({1:.0F} us/read; {2:.2F} M reads/minute).".format(
stats.time, 1E6 * stats.time / stats.n, stats.n / stats.time * 60 / 1E6))
report = "\n=== Summary ===\n\n"
if stats.paired:
report += textwrap.dedent("""\
Total read pairs processed: {n:13,d}
Read 1 with adapter: {with_adapters[0]:13,d} ({with_adapters_fraction[0]:.1%})
Read 2 with adapter: {with_adapters[1]:13,d} ({with_adapters_fraction[1]:.1%})
""")
else:
report += textwrap.dedent("""\
Total reads processed: {n:13,d}
Reads with adapters: {with_adapters[0]:13,d} ({with_adapters_fraction[0]:.1%})
""")
if stats.too_short is not None:
report += "{pairs_or_reads} that were too short: {too_short:13,d} ({too_short_fraction:.1%})\n"
if stats.too_long is not None:
report += "{pairs_or_reads} that were too long: {too_long:13,d} ({too_long_fraction:.1%})\n"
if stats.too_many_n is not None:
report += "{pairs_or_reads} with too many N: {too_many_n:13,d} ({too_many_n_fraction:.1%})\n"
report += textwrap.dedent("""\
{pairs_or_reads} written (passing filters): {written:13,d} ({written_fraction:.1%})
Total basepairs processed: {total_bp:13,d} bp
""")
if stats.paired:
report += " Read 1: {total_bp1:13,d} bp\n"
report += " Read 2: {total_bp2:13,d} bp\n"
if stats.did_quality_trimming:
report += "Quality-trimmed: {quality_trimmed:13,d} bp ({quality_trimmed_fraction:.1%})\n"
if stats.paired:
report += " Read 1: {quality_trimmed_bp[0]:13,d} bp\n"
report += " Read 2: {quality_trimmed_bp[1]:13,d} bp\n"
report += "Total written (filtered): {total_written_bp:13,d} bp ({total_written_bp_fraction:.1%})\n"
if stats.paired:
report += " Read 1: {written_bp[0]:13,d} bp\n"
report += " Read 2: {written_bp[1]:13,d} bp\n"
v = vars(stats)
v['pairs_or_reads'] = "Pairs" if stats.paired else "Reads"
try:
report = report.format(**v)
except ValueError:
# Python 2.6 does not support the comma format specifier (PEP 378)
report = report.replace(",d}", "d}").format(**v)
print(report)
warning = False
for which_in_pair in (0, 1):
for adapter in adapters_pair[which_in_pair]:
total_front = sum(adapter.lengths_front.values())
to
|
silbertmonaphia/ml
|
co/20170212/test.py
|
Python
|
gpl-3.0
| 5,721
| 0.006817
|
#! /usr/bin/env python
# encoding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import logging
import random
import multiprocessing
import numpy as np
import sklearn
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
import jieba
def load_data(filename):
data = open(filename, 'r').readlines()
#datat is a list
for data in data:
print data
return data, len(data)
def split_data(data, data_label, size=0.2, random=0):
train, test, train_label, test_label = train_test_split(data, data_label, test_size=size, random_state=random)
return train, test, train_label, test_label
def naive_bayes_classifier(train_x, train_y):
model = MultinomialNB()
model.fit(train_x, train_y)
return model
def Tfidf(data):
part_word = lambda x: jieba.cut(x)
stopwords = open('stopword.txt
|
', '
|
r').readlines()
vec = TfidfVectorizer(tokenizer=part_word, stop_words=stopwords)
vector=vec.fit_transform(data).toarray()
return vector
def delete(datas, vectors):
index = []
for i in range(datas.shape[0]):
for vector in vectors:
if not (vector - datas[i, :]).any():
index.append(i)
return np.delete(datas, index, 0)
def trainer(param_wrapper):
#param_wrapper = unsplit, train, train_label, m
unsplit = param_wrapper[0]
train = param_wrapper[1]
train_label = param_wrapper[2]
m = param_wrapper[3]
return
pro_0 = np.array([])
pro_1 = np.array([])
for m_i in range(m):
#var 'rand' stands for the index of those features which is going to construct subspace
rand = random.sample(range(train.shape[1]), train.shape[1] / m)
model = naive_bayes_classifier(train[:, np.array(rand)], train_label)
#unsplit[rand] means a view
pro = model.predict_proba(unsplit[rand])
pro_0 = np.r_[pro_0, pro[:, 0]]#negative
pro_1 = np.r_[pro_1, pro[:, 1]]#positive
if max(pro_0) > max(pro_1):
return max(pro_0), 0, unsplit
else:
return max(pro_1), 1, unsplit
def main(n,m):
# global n, m
# load data
pos, pos_size = load_data('pos.txt')
neg, neg_size = load_data('neg.txt')
data = pos + neg
#generate labels of pos and neg
label = [1] * pos_size + [0] * neg_size
#vectorize list==>ndarray
vector = Tfidf(data)
# split data,train=0.1,test=0.2,unsplit=0.7
train, test, train_label, test_label = split_data(vector, label)
unsplits, train, unsplit_label, train_label = split_data(train, train_label, 1.0 / 8)
while unsplits.shape[0] != 0:
param_wrapper = []
for unsplit in unsplits:
param_wrapper.append((unsplit, train, train_label, m))
#doesn't change the train and train_label set(not enlarge them)
results = pool.map(trainer, param_wrapper)
pro_0 = []
pro_1 = []
vectors_0 = []
vectors_1 = []
#divide result into pos and neg these 2 classes
for result in results:
if result[1] > 0:#if the tag is pos
pro_1.append(result[0])
vectors_1.append(result[2])
else:
pro_0.append(result[0])
vectors_0.append(result[2])
index_0 = np.argsort(-np.array(pro_0))
index_1 = np.argsort(-np.array(pro_1))
vectors_0 = np.array(vectors_0)
vectors_1 = np.array(vectors_1)
#Update train and train_label set and remove
if vectors_0.shape[0] >= n:
train = np.r_[train, vectors_0[index_0[0:n], :]]
train_label = np.r_[train_label, np.array([0] * n)]
unsplits = delete(unsplits, vectors_0[index_0[0:n], :])
else:
if vectors_0.shape[0] > 0:
train = np.r_[train, vectors_0]
train_label = np.r_[train_label,
np.array([0] * vectors_0.shape[0])]
unsplits = delete(unsplits, vectors_0)
if vectors_1.shape[0] >= n:
train = np.r_[train, vectors_1[index_1[0:n], :]]
train_label = np.r_[train_label, np.array([1] * n)]
unsplits = delete(unsplits, vectors_1[index_1[0:n], :])
else:
if vectors_1.shape[0] > 0:
train = np.r_[train, vectors_1]
train_label = np.r_[train_label,
np.array([1] * vectors_1.shape[0])]
unsplits = delete(unsplits, vectors_1)
print 'unsplit= ', str(unsplits.shape[0]), 'train= ', str(train.shape[0])
model = naive_bayes_classifier(train, train_label)
#test
predict = model.predict(test)
accuracy = metrics.accuracy_score(test_label, predict)
recall = metrics.recall_score(test_label, predict)
print 'accuracy= ' + str(accuracy * 100) + '%'
print 'recall= ' + str(recall * 100) + '%'
if __name__ == '__main__':
# set param
m = 4 # the number of subspaces
n = 8 # the number of updates staff every time
pool = multiprocessing.Pool(processes=14)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='my.log',
filemode='w')
main(n,m)
|
koodilehto/kryptoradio-xmit-tests
|
receiver/adapter_info.py
|
Python
|
mit
| 324
| 0
|
import linuxdvb
import fcntl
fefd = open('/dev/dvb/adapter0/frontend0', 'r+')
# Information
feinfo = linuxdvb.dvb_frontend_info()
fcntl.ioctl(fefd, l
|
inuxdvb.FE_GET_INFO, feinfo)
print feinfo.name
for bit, flag in linuxdvb.fe_caps.items():
if (feinfo.caps & bit) > 0:
print(
|
"cap = "+flag)
# Close
fefd.close()
|
ethereum/solidity
|
test/scripts/test_isolate_tests.py
|
Python
|
gpl-3.0
| 3,398
| 0.002943
|
#!/usr/bin/env python
import unittest
from textwrap import dedent, indent
from unittest_helpers import FIXTURE_DIR, load_fixture
# NOTE: This test file file only works with scripts/ added to PYTHONPATH so pylint can't find the imports
# pragma pylint: disable=import-error
from isolate_tests import extract_solidity_docs_cases, extract_yul_docs_cases
# pragma pylint: enable=import-error
CODE_BLOCK_RST_PATH = FIXTURE_DIR / 'code_block.rst'
CODE_BLOCK_RST_CONTENT = load_fixture(CODE_BLOCK_RST_PATH)
CODE_BLOCK_WITH_DIRECTIVES_RST_PATH = FIXTURE_DIR / 'code_block_with_directives.rst'
CODE_BLOCK_WITH_DIRECTIVES_RST_CONTENT = load_fixture(CODE_BLOCK_WITH_DIRECTIVES_RST_PATH)
def formatCase(text):
"""Formats code to contain only one indentation and terminate with a \n"""
return indent(dedent(text.lstrip("\n")), " ") + "\n"
class TestExtractDocsCases(unittest.TestCase):
def setUp(self):
self.maxDiff = 10000
def test_solidity_block(self):
expected_cases = [formatCase(case) for case in [
"""
// SPDX-License-Identifier: GPL-3.0
pragma solidity >=0.7.0 <0.9.0;
contract C {
function foo() public view {}
}
""",
"""
contract C {}
""",
]]
self.assertEqual(extract_solidity_docs_cases(CODE_BLOCK_RST_PATH), expected_cases)
def test_solidity_block_with_directives(self):
expected_cases = [formatCase(case) for case in [
"""
// SPDX-License-Identifier: GPL-3.0
pragma solidity >=0.7.0 <0.9.0;
contract C {
function foo() public view {}
}
""",
"""
contract C {}
""",
"""
contract D {}
:linenos:
""",
"""
contract E {}
""",
]]
self.assertEqual(extract_solidity_docs_cases(CODE_BLOCK_WITH_DIRECTIVES_RST_PATH), expected_cases)
def test_yul_block(self):
expected_cases = [formatCase(case) for case in [
"""
{
let x := add(1, 5)
}
""",
"""
// Yul code wrapped in object
{
{
let y := mul(3, 5)
}
}
""",
"""
// Yul code wrapped in named object
object "Test" {
{
let y := mul(6, 9)
|
}
}
""",
]]
self.assertEqual(extract_yul_docs_cases(CODE_BLOCK_RST_PATH), expected_cases)
def test_yul_block_with_directives(self):
expected_cases = [formatCase(case) for case in [
"""
{
let x := add(1, 5)
}
""",
"""
|
// Yul code wrapped in object
{
let y := mul(3, 5)
}
""",
"""
// Yul code wrapped in named object
object "Test" {
let y := mul(3, 5)
:linenos:
}
""",
]]
self.assertEqual(extract_yul_docs_cases(CODE_BLOCK_WITH_DIRECTIVES_RST_PATH), expected_cases)
|
OlegKlimenko/Plamber
|
api/tests/test_views/test_index.py
|
Python
|
apache-2.0
| 8,371
| 0.004778
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from ...views.index_views import user_login
from app.models import TheUser
# ----------------------------------------------------------------------------------------------------------------------
class IndexViewsTestCase(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='api_login', email='api_login@email.com', password='123456')
cls.the_user = TheUser.objects.get(id_user=cls.user)
cls.client = APIClient()
cls.api_key = settings.API_SECRET_KEY
# ----------------------------------------------------------------------------------------------------------
|
--------
def test_user_login_missing_params(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'username'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'password':
|
['This field is required.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_long_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'a' * 40,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'username': ['Ensure this field has no more than 30 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_short_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'a',
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'],{'username': ['Ensure this field has at least 2 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_username_regex_not_valid(self):
username_patterns = [
'ab#$@cdev', '#$@username', 'username%#&#&', 'db24!!!db34', '#$@234234', '#123dkf%'
]
for pattern in username_patterns:
with self.subTest(pattern=pattern):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': pattern,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'],
{'username': ['This value does not match the required pattern.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_email_regex_not_valid(self):
email_patterns = [
'no_extension@ddd', '@first.missing', 'after_at_miss@', '$%#@474**.om', 'em#$@ail@m.com', '#em@ail@m.com'
]
for pattern in email_patterns:
with self.subTest(pattern=pattern):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': pattern,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'],
{'username': ['This value does not match the required pattern.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_long_password(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'test_username',
'password': 'p' * 17})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'password': ['Ensure this field has no more than 16 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_valid_username_user_not_exists(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'test_username',
'password': 'password'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['data']['token'], None)
self.assertEqual(response.data['detail'], 'not authenticated')
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_valid_email_user_not_exists(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'api_login_email@email.com',
'password': '123456'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['data']['token'], None)
self.assertEqual(response.data['detail'], 'not authenticated')
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_success_with_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'api_login',
'password': '123456'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['data']['token'], self.the_user.auth_token)
self.assertEqual(response.data['detail'], 'successful')
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_success_with_email(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'api_login@email.com',
'password': '123456'})
self.assertEqual(response.resolver_match.func, user_login)
sel
|
geier/alot
|
alot/buffers.py
|
Python
|
gpl-3.0
| 24,285
| 0
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import logging
import os
import urwid
from urwidtrees import ArrowTree, TreeBox, NestedTree
from notmuch import NotmuchError
from .settings.const import settings
from . import commands
from .walker import PipeWalker
from .helper import shorten_author_string
from .db.errors import NonexistantObjectError
from .widgets.globals import TagWidget
from .widgets.globals import HeadersList
from .widgets.globals import AttachmentWidget
from .widgets.bufferlist import BufferlineWidget
from .widgets.search import ThreadlineWidget
from .widgets.thread import ThreadTree
class Buffer(object):
"""Abstract base class for buffers."""
modename = None # mode identifier for subclasses
def __init__(self, ui, widget):
self.ui = ui
self.body = widget
def __str__(self):
return '[%s]' % self.modename
def render(self, size, focus=False):
return self.body.render(size, focus)
def selectable(self):
return self.body.selectable()
def rebuild(self):
"""tells the buffer to (re)construct its visible content."""
pass
def keypress(self, size, key):
return self.body.keypress(size, key)
def cleanup(self):
"""called before buffer is closed"""
pass
def get_info(self):
"""
return dict of meta infos about this buffer.
This can be requested to be displayed in the statusbar.
"""
return {}
class BufferlistBuffer(Buffer):
"""lists all active buffers"""
modename = 'bufferlist'
def __init__(self, ui, filtfun=lambda x: x):
self.filtfun = filtfun
self.ui = ui
self.isinitialized = False
self.rebuild()
Buffer.__init__(self, ui, self.body)
def index_of(self, b):
"""
returns the index of :class:`Buffer` `b` in the global list of active
buffers.
"""
return self.ui.buffers.index(b)
def rebuild(self):
if self.isinitialized:
focusposition = self.bufferlist.get_focus()[1]
else:
focusposition = 0
self.isinitialized = True
lines = list()
displayedbuffers = [b for b in self.ui.buffers if self.filtfun(b)]
for (num, b) in enumerate(displayedbuffers):
line = BufferlineWidget(b)
if (num % 2) == 0:
attr = settings.get_theming_attribute('bufferlist',
'line_even')
else:
attr = settings.get_theming_attribute('bufferlist', 'line_odd')
focus_att = settings.get_theming_attribute('bufferlist',
'line_focus')
buf = urwid.AttrMap(line, attr, focus_att)
num = urwid.Text('%3d:' % self.index_of(b))
lines.append(urwid.Columns([('fixed', 4, num), buf]))
self.bufferlist = urwid.ListBox(urwid.SimpleListWalker(lines))
num_buffers = len(displayedbuffers)
if focusposition is not None and num_buffers > 0:
self.bufferlist.set_focus(focusposition % num_buffers)
self.body = self.bufferlist
def get_selected_buffer(self):
"""returns currently selected :class:`Buffer` element from list"""
|
linewidget, _ = self.bufferlist.get_focus()
bufferlinewidget = linewidget.get_focus().original_widget
return bufferlinewidget.get_buffer()
def focus_first(self):
"""Focus the first line in the buffer list."""
self.body.set_focus(0)
class EnvelopeBuffer(Buffer):
"""message composition mode"""
modename = 'envelope'
def __init__(self,
|
ui, envelope):
self.ui = ui
self.envelope = envelope
self.all_headers = False
self.rebuild()
Buffer.__init__(self, ui, self.body)
def __str__(self):
to = self.envelope.get('To', fallback='unset')
return '[envelope] to: %s' % (shorten_author_string(to, 400))
def get_info(self):
info = {}
info['to'] = self.envelope.get('To', fallback='unset')
return info
def cleanup(self):
if self.envelope.tmpfile:
os.unlink(self.envelope.tmpfile.name)
def rebuild(self):
displayed_widgets = []
hidden = settings.get('envelope_headers_blacklist')
# build lines
lines = []
for (k, vlist) in self.envelope.headers.iteritems():
if (k not in hidden) or self.all_headers:
for value in vlist:
lines.append((k, value))
# sign/encrypt lines
if self.envelope.sign:
description = 'Yes'
sign_key = self.envelope.sign_key
if sign_key is not None and len(sign_key.subkeys) > 0:
description += ', with key ' + sign_key.uids[0].uid
lines.append(('GPG sign', description))
if self.envelope.encrypt:
description = 'Yes'
encrypt_keys = self.envelope.encrypt_keys.values()
if len(encrypt_keys) == 1:
description += ', with key '
elif len(encrypt_keys) > 1:
description += ', with keys '
key_ids = []
for key in encrypt_keys:
if key is not None and key.subkeys:
key_ids.append(key.uids[0].uid)
description += ', '.join(key_ids)
lines.append(('GPG encrypt', description))
if self.envelope.tags:
lines.append(('Tags', ','.join(self.envelope.tags)))
# add header list widget iff header values exists
if lines:
key_att = settings.get_theming_attribute('envelope', 'header_key')
value_att = settings.get_theming_attribute('envelope',
'header_value')
gaps_att = settings.get_theming_attribute('envelope', 'header')
self.header_wgt = HeadersList(lines, key_att, value_att, gaps_att)
displayed_widgets.append(self.header_wgt)
# display attachments
lines = []
for a in self.envelope.attachments:
lines.append(AttachmentWidget(a, selectable=False))
if lines:
self.attachment_wgt = urwid.Pile(lines)
displayed_widgets.append(self.attachment_wgt)
self.body_wgt = urwid.Text(self.envelope.body)
displayed_widgets.append(self.body_wgt)
self.body = urwid.ListBox(displayed_widgets)
def toggle_all_headers(self):
"""toggles visibility of all envelope headers"""
self.all_headers = not self.all_headers
self.rebuild()
class SearchBuffer(Buffer):
"""shows a result list of threads for a query"""
modename = 'search'
threads = []
_REVERSE = {'oldest_first': 'newest_first',
'newest_first': 'oldest_first'}
def __init__(self, ui, initialquery='', sort_order=None):
self.dbman = ui.dbman
self.ui = ui
self.querystring = initialquery
default_order = settings.get('search_threads_sort_order')
self.sort_order = sort_order or default_order
self.result_count = 0
self.isinitialized = False
self.proc = None # process that fills our pipe
self.rebuild()
Buffer.__init__(self, ui, self.body)
def __str__(self):
formatstring = '[search] for "%s" (%d message%s)'
return formatstring % (self.querystring, self.result_count,
's' if self.result_count > 1 else '')
def get_info(self):
info = {}
info['querystring'] = self.querystring
info['result_count'] = self.result_count
info['result_count_positive'] = 's' if self.result_count > 1 else ''
return info
def cleanup(self):
self.kill_filler_process()
def kill_filler_process(self):
"""
terminates the proce
|
nkgilley/home-assistant
|
homeassistant/components/verisure/switch.py
|
Python
|
apache-2.0
| 2,311
| 0.000433
|
"""Support for Verisure Smartplugs."""
import logging
from time import monotonic
from homeassistant.components.switch import SwitchEntity
from . import CONF_SMARTPLUGS, HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure switch platform."""
if not int(hub.config.get(CONF_SMARTPLUGS, 1)):
return False
hub.update_overview()
switches = []
s
|
witches.extend(
[
VerisureSmartplug(device_label)
for device_label in hub.get("$.smartPlugs[*].deviceLabel")
]
)
add_entities(switches)
class VerisureSmartplug(SwitchEntity):
"""Representation of a Verisure smartplug."""
|
def __init__(self, device_id):
"""Initialize the Verisure device."""
self._device_label = device_id
self._change_timestamp = 0
self._state = False
@property
def name(self):
"""Return the name or location of the smartplug."""
return hub.get_first(
"$.smartPlugs[?(@.deviceLabel == '%s')].area", self._device_label
)
@property
def is_on(self):
"""Return true if on."""
if monotonic() - self._change_timestamp < 10:
return self._state
self._state = (
hub.get_first(
"$.smartPlugs[?(@.deviceLabel == '%s')].currentState",
self._device_label,
)
== "ON"
)
return self._state
@property
def available(self):
"""Return True if entity is available."""
return (
hub.get_first("$.smartPlugs[?(@.deviceLabel == '%s')]", self._device_label)
is not None
)
def turn_on(self, **kwargs):
"""Set smartplug status on."""
hub.session.set_smartplug_state(self._device_label, True)
self._state = True
self._change_timestamp = monotonic()
def turn_off(self, **kwargs):
"""Set smartplug status off."""
hub.session.set_smartplug_state(self._device_label, False)
self._state = False
self._change_timestamp = monotonic()
# pylint: disable=no-self-use
def update(self):
"""Get the latest date of the smartplug."""
hub.update_overview()
|
radical-cybertools/ExTASY
|
doc/scripts/user_script.py
|
Python
|
mit
| 1,886
| 0.008484
|
from radical.ensemblemd import Kernel
from radical.ensemblemd import Pipeline
from radical.ensemblemd import EnsemblemdError
from radical.ensemblemd import SingleClusterEnvironment
#Used to register user defined kernels
from radical.ensemblemd.engine import get_engine
#Import our new kernel
from new_kernel import MyUserDefinedKernel
# Register the user-defined kernel with Ensemble MD Toolkit.
get_engine().add_kernel_plugin(MyUserDefinedKernel)
#Now carry on with your application as usual !
class Sleep(Pipeline):
def __init__(self, instances,steps):
Pipeline.__init__(self, instances,steps)
def step_1(self, instance):
"""This step sleeps for 60 seconds."""
k = Kernel(name="sleep")
k.arguments = ["--interval=10"]
return k
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
try:
# Create a new static execution context with one resource and a fixed
# number of cores and runtime.
cluster = SingleClusterEnvironment(
resource="local.localhost",
cores=1,
walltime=15,
username=None,
project=None
)
# Allocate the resources.
cluster.allocate()
# Set the 'instances' of the pipeline to 16. This means that 16 instances
# of each pipeline step are executed.
|
#
# Execution of the 16 pipeline instances can happen concurrently or
# sequentially, depending on the resources (cores) available in the
# SingleClusterEnvironment.
sleep = Sleep(steps=1,instances=16)
cluster.run(sleep)
cluster.deallocate()
except EnsemblemdError, er:
print "Ensemble MD Toolkit Error: {0}".format(str(er))
raise # Just raise th
|
e execption again to get the backtrace
|
liuhong1happy/DockerConsoleApp
|
views/application.py
|
Python
|
apache-2.0
| 6,294
| 0.016378
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from services.application import ApplicationService
from services.application_access import ApplicationAccessService
import tornado.web
from tornado import gen
import tornado.escape
import json
from util.rabbitmq import send_message
from stormed import Message
import settings
from views import AsyncBaseHandler
import time
class ApplicationRunHandler(AsyncBaseHandler):
s_application = ApplicationService()
@gen.coroutine
def _post_(self):
project_url = self.get_argument("project_url",None)
project_name = self.get_argument("project_name",None)
storage_path = self.get_argument("storage_path",None)
user_id = str(self.current_user.get("_id",None))
user_name = str(self.current_user.get("name",None))
create_time = time.time()
# 数据库操作
insertData = {}
insertData["project_url"] = project_url
insertData["project_name"] = project_name
insertData["storage_path"] = storage_path
insertData["user_id"] = user_id
insertData["status"] = 'created'
insertData["logs"] = [{"create_time":create_time,"info":{"stream":"started run application:"+project_name},"user_id":user_id}]
result = yield self.s_application.insert_application(insertData)
# 加入队列
msg = Message( json.dumps({
"application_id":result["_id"],
"project_url":project_url,
"project_name":project_name,
"storage_path":storage_path,
"user_id":user_id,
"user_name":user_name,
'app_count':1,
"reply_to":'service_logs'
}))
send_message(msg,settings.RUN_APPLICATION_EXCHANGE,settings.RUN_APPLICATION_ROUTING)
if result is None:
self.render_error(error_code=404,msg="not data")
else:
insertData["_id"] = result["_id"]
self.write_result(data=insertData)
class ApplicationInfoHandler(AsyncBaseHandler):
s_application = ApplicationService()
@gen.coroutine
def _post_(self):
application_id = self.get_argument("application_id",None)
app = yield self.s_applicatio
|
n.find_one(application_id)
app["_id"] = str(app["_id"])
if app is None:
self.render_error(error_code=404,msg="not data")
else:
s
|
elf.write_result(data=app)
class ApplicationsHandler(AsyncBaseHandler):
s_application = ApplicationService()
fields={
"project_url":True,
"project_name":True,
"app_name":True,
"user_id":True,
"user_name":True,
"status":True,
"logs":True,
"update_time":True,
'create_time':True,
"run_host":True,
"inspect_container":True,
"address_prefix":True,
"singleton":True
}
@gen.coroutine
def _get_(self):
spec_type = self.get_argument("spec_type","app_name")
spec_text = self.get_argument("spec_text","")
page_index =int(self.get_argument("page_index",0))
page_size =int(self.get_argument("page_size",20))
spec ={}
spec[spec_type]={ '$regex' : spec_text}
spec["user_id"] = str(self.current_user.get("_id",None))
applications =yield self.s_application.get_appliactions(spec,fields=self.fields,page_index=page_index,page_size=page_size)
if not applications:
self.render_error(error_code=404,msg="not data")
else:
self.write_result(data=applications)
class ApplicationAccessHandler(AsyncBaseHandler):
s_application = ApplicationService()
s_application_access = ApplicationAccessService()
@gen.coroutine
def _get_(self):
access_id = self.get_argument("access_id",None)
access_info = yield self.s_application_access.find_one(access_id)
if access_info is None:
self.render_error(error_code=404,msg="not data")
else:
self.write_result(data=access_info)
@gen.coroutine
def _post_(self):
access_type = self.get_argument("type",None)
application_id = self.get_argument("id",None)
access_content = self.get_argument("content","")
container_info =yield self.s_application.find_one(application_id)
if container_info is None:
container_info = {}
# 从数据库获取,切记不要对外公开
container_host = container_info.get("run_host",None)
container_name = container_info.get("app_name",None)
if container_host is None or container_name is None:
self.render_error(error_code=404,msg="not success")
user_id = str(self.current_user.get("_id",None))
user_name = str(self.current_user.get("name",None))
create_time = time.time()
# 数据库操作
accessData = {}
accessData["access_type"] = access_type
accessData["application_id"] = application_id
accessData["container_name"] = container_name
accessData["container_host"] = container_host
accessData["access_content"] = access_content
accessData["user_id"] = user_id
accessData["status"] = 'start'
accessData["logs"] = [
{
"create_time":create_time,
"info":"started access application:"+application_id+",it is hosted in "+container_host,
"user_id":user_id
}
]
result= yield self.s_application_access.access_application(accessData)
# 加入队列
msg = Message( json.dumps({
"access_id":result,
"access_type":access_type,
"access_content":access_content,
"application_id":application_id,
"container_host":container_host,
"container_name":container_name,
"user_id":user_id,
"user_name":user_name,
"reply_to":'access_logs'
}))
send_message(msg,settings.ACCESS_APPLICATION_EXCHANGE,settings.ACCESS_APPLICATION_ROUTING+"."+container_host)
if result is None:
self.render_error(error_code=404,msg="not data")
else:
accessData["_id"] = str(result)
self.write_result(data=accessData)
|
getsentry/sentry-hipchat-ac
|
sentry_hipchat_ac/migrations/0002_auto__del_mentionedevent.py
|
Python
|
apache-2.0
| 9,514
| 0.007988
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'MentionedEvent'
db.delete_table(u'sentry_hipchat_ac_mentionedevent')
def backwards(self, orm):
# Adding model 'MentionedEvent'
db.create_table(u'sentry_hipchat_ac_mentionedevent', (
('last_mentioned', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('group', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(related_name='hipchat_mentioned_groups', to=orm['sentry.Group'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(related_name='hipchat_mentioned_events', to=orm['sentry.Project'])),
('event', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(related_name='hipchat_mentioned_events', null=True, to=orm['sentry.Event'])),
('tenant', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry_hipchat_ac.Tenant'])),
))
db.send_create_signal(u'sentry_hipchat_ac', ['MentionedEvent'])
models = {
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'sentry_hipchat_ac.tenant': {
'Meta': {'object_name': 'Tenant'},
'api_base_url': ('django.db.models.fields.CharField', [], {'max_length
|
': '250'}),
'auth_user': ('sentry.db.models.fiel
|
ds.foreignkey.FlexibleForeignKey', [], {'related_name': "'hipchat_tenant_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'capabilities_url': ('django.db.models.fields.CharField', [], {'
|
npklein/easybuild-easyconfigs
|
easybuild/easyconfigs/r/RPlus/find_missing_extensions.py
|
Python
|
gpl-2.0
| 27,561
| 0.000109
|
exts_list = [
('lattice', '0.20-35', cran_options),
('nlme', '3.1-131.1', cran_options),
('Matrix', '1.2-12', cran_options),
('mgcv', '1.8-26', cran_options),
('plotfunctions', '1.3', cran_options),
('itsadug', '2.3', cran_options),
('abind', '1.4-5', cran_options),
('acepack', '1.4.1', cran_options),
('MASS', '7.3-49', cran_options),
('ade4', '1.7-10', cran_options),
('tkrplot', '0.0-23', cran_options),
('foreign', '0.8-69', cran_options),
('shapefiles', '0.7', cran_options),
('sp', '1.2-7', cran_options),
('adehabitat', '1.8.20', cran_options),
('nnet', '7.3-12', cran_options),
('Rcpp', '0.12.16', cran_options),
('minqa', '1.2.4', cran_options),
('nloptr', '1.0.4', cran_options),
('RcppEigen', '0.3.3.4.0', cran_options),
('lme4', '1.1-15', cran_options),
('pbkrtest', '0.4-7', cran_options),
('SparseM', '1.77', cran_options),
('MatrixModels', '0.4-1', cran_options),
('quantreg', '5.35', cran_options),
('car', '2.1-6', cran_options),
('zoo', '1.8-1', cran_options),
('lmtest', '0.9-35', cran_options),
('sandwich', '2.4-0', cran_options),
('survival', '2.41-3', cran_options),
('Formula', '1.2-2', cran_options),
('AER', '1.2-5', cran_options),
('combinat', '0.0-8', cran_options),
('httpuv', '1.3.6.2', cran_options),
('mime', '0.5', cran_options),
('jsonlite', '1.5', cran_options),
('xtable', '1.8-2', cran_options),
('digest', '0.6.15', cran_options),
('htmltools', '0.3.6', cran_options),
('R6', '2.2.2', cran_options),
('sourcetools', '0.1.6', cran_options),
('shiny', '1.0.5', cran_options),
('miniUI', '0.1.1', cran_options),
('rstudioapi', '0.7', cran_options),
('highr', '0.6', cran_options),
('class', '7.3-14', cran_options),
('e1071', '1.6-8', cran_options),
('classInt', '0.1-24', cran_options),
('magrittr', '1.5', cran_options),
('rlang', '0.2.0', cran_options),
('assertthat', '0.2.0', cran_options),
('crayon', '1.3.4', cran_options),
('cli', '1.0.0', cran_options),
('utf8', '1.1.3', cran_options),
('pillar', '1.2.1', cran_options),
('tibble', '1.4.2', cran_options),
('forcats', '0.3.0', cran_options),
('pkgconfig', '2.0.1', cran_options),
('hms', '0.4.2', cran_options),
('BH', '1.66.0-1', cran_options),
('readr', '1.1.1', cran_options),
('haven', '1.1.1', cran_options),
('labelled', '1.0.1', cran_options),
('questionr', '0.6.2', cran_options),
('klaR', '0.6-14', cran_options),
('cluster', '2.0.6', cran_options),
('spData', '0.2.8.3', cran_opti
|
ons),
('LearnBayes', '2.15.1', cran_options),
('deldir', '0.1-14', cran_options),
('boot', '1.3-20', cran_options),
('coda', '0.19-1', cran_options),
('gtools', '3.5.0', cran_options),
('gdata', '2.18.0', cran_options),
('gmodels', '2.16.2', cran_options),
('expm', '0.999-2', cran_options),
('spdep', '0.7-4', cran_options),
('AlgDesign', '1.1-7.3', cran_options),
('agricolae', '1.2-8', cran_options),
('akima', '0.6-2', cran_options),
('animatio
|
n', '2.5', cran_options),
('BiocGenerics', '0.24.0', bioconductor_options),
('Biobase', '2.38.0', bioconductor_options),
('S4Vectors', '0.16.0', bioconductor_options),
('IRanges', '2.12.0', bioconductor_options),
('DBI', '0.8', cran_options),
('bit', '1.1-12', cran_options),
('bit64', '0.9-7', cran_options),
('prettyunits', '1.0.2', cran_options),
('blob', '1.1.1', cran_options),
('memoise', '1.1.0', cran_options),
('plogr', '0.2.0', cran_options),
('RSQLite', '2.0', cran_options),
('AnnotationDbi', '1.40.0', bioconductor_options),
('XML', '3.98-1.10', cran_options),
('bitops', '1.0-6', cran_options),
('RCurl', '1.95-4.10', cran_options),
('annotate', '1.56.2', bioconductor_options),
('ape', '5.0', cran_options),
('proto', '1.0.0', cran_options),
('findpython', '1.0.3', cran_options),
('getopt', '1.20.2', cran_options),
('argparse', '1.1.1', cran_options),
('argparser', '0.4', cran_options),
('arm', '1.9-3', cran_options),
('AUC', '0.3.0', cran_options),
('backports', '1.1.2', cran_options),
('openssl', '1.0.1', cran_options),
('base64', '2.0', cran_options),
('base64enc', '0.1-3', cran_options),
('bindr', '0.1.1', cran_options),
('bindrcpp', '0.2', cran_options),
('glue', '1.2.0', cran_options),
('dplyr', '0.7.4', cran_options),
('gtable', '0.2.0', cran_options),
('plyr', '1.8.4', cran_options),
('stringi', '1.1.7', cran_options),
('stringr', '1.3.0', cran_options),
('reshape2', '1.4.3', cran_options),
('RColorBrewer', '1.1-2', cran_options),
('dichromat', '2.0-0', cran_options),
('colorspace', '1.3-2', cran_options),
('munsell', '0.4.3', cran_options),
('labeling', '0.3', cran_options),
('viridisLite', '0.3.0', cran_options),
('scales', '0.5.0', cran_options),
('lazyeval', '0.2.1', cran_options),
('ggplot2', '2.2.1', cran_options),
('bayesplot', '1.4.0', cran_options),
('numDeriv', '2016.8-1', cran_options),
('bbmle', '1.0.20', cran_options),
('bdsmatrix', '1.3-3', cran_options),
('beanplot', '1.2', cran_options),
('beeswarm', '0.2.3', cran_options),
('modeltools', '0.2-21', cran_options),
('flexmix', '2.3-14', cran_options),
('betareg', '3.1-0', cran_options),
('bibtex', '0.4.2', cran_options),
('biglm', '0.9-1', cran_options),
('bigmemory.sri', '0.1.3', cran_options),
('bigmemory', '4.5.33', cran_options),
('hglm.data', '1.0-0', cran_options),
('hglm', '2.1-1', cran_options),
('DatABEL', '0.9-6', cran_options),
('bigRR', '1.3-10', cran_options),
('gmp', '0.5-13.1', cran_options),
('polynom', '1.3-9', cran_options),
('partitions', '1.9-19', cran_options),
('binGroup', '2.1-1', cran_options),
('rappdirs', '0.3.1', cran_options),
('yaml', '2.1.18', cran_options),
('curl', '3.1', cran_options),
('httr', '1.3.1', cran_options),
('xml2', '1.2.0', cran_options),
('semver', '0.2.0', cran_options),
('binman', '0.1.0', cran_options),
('BiocInstaller', '1.28.0', bioconductor_options),
('lambda.r', '1.2', cran_options),
('futile.options', '1.0.0', cran_options),
('futile.logger', '1.4.3', cran_options),
('snow', '0.4-2', cran_options),
('BiocParallel', '1.12.0', bioconductor_options),
('progress', '1.1.2', cran_options),
('biomaRt', '2.34.2', bioconductor_options),
('zlibbioc', '1.24.0', bioconductor_options),
('rhdf5', '2.22.0', bioconductor_options),
('biomformat', '1.6.0', bioconductor_options),
('XVector', '0.18.0', bioconductor_options),
('Biostrings', '2.46.0', bioconductor_options),
('evaluate', '0.10.1', cran_options),
('markdown', '0.8', cran_options),
('knitr', '1.20', cran_options),
('rprojroot', '1.3-2', cran_options),
('rmarkdown', '1.9', cran_options),
('xfun', '0.1', cran_options),
('tinytex', '0.4', cran_options),
('bookdown', '0.7', cran_options),
('profileModel', '0.5-9', cran_options),
('brglm', '0.6.1', cran_options),
('qvcalc', '0.9-1', cran_options),
('BradleyTerry2', '1.0-8', cran_options),
('brew', '1.0-6', cran_options),
('mvtnorm', '1.0-7', cran_options),
('Brobdingnag', '1.2-5', cran_options),
('bridgesampling', '0.4-0', cran_options),
('StanHeaders', '2.17.2', cran_options),
('inline', '0.3.14', cran_options),
('gridExtra', '2.3', cran_options),
('rstan', '2.17.3', cran_options),
('matrixStats', '0.53.1', cran_options),
('loo', '1.1.0', cran_options),
('rstantools', '1.4.0', cran_options),
('htmlwidgets', '1.0', cran_options),
('shinyjs', '1.0', cran_options),
('colourpicker', '1.0', cran_options),
('crosstalk', '1.0.0', cran_options),
('DT', '0.4', cran_options),
('xts', '0.10-2', cran_options),
('dygraphs', '1.1.1.4', cran_options),
('PKI', '0.1-5.1', cran_options),
('RJSONIO', '1.3-0', cran_options),
('packrat', '0.4.9-1', cran_options
|
ebressert/ScipyNumpy_book_examples
|
python_examples/numpy_231_ex2.py
|
Python
|
mit
| 420
| 0
|
import numpy as np
# Loading and existing file
arr = np.loadtxt('somefile.txt')
# Saving a new file
np.savetxt('somenewfile.txt', arr)
# Opening an existing file with the append option
f = open('existingfile.txt', 'a')
# Creating some random data to append to the existing file
data2append = np.random.rand(100)
# With np.s
|
avetxt we replace the file name with the file handle.
np.savetxt(f, data2ap
|
pend)
f.close()
|
lsbardel/flow
|
flow/finance/cashflow/cash.py
|
Python
|
bsd-3-clause
| 1,829
| 0.02187
|
from icash import *
from jflow.utils.observer import lazyvalue
class singlecash(icash):
'''
Simple cash flow.
Date, currency and dummy implementation
'''
def __init__(self, date = None, dummy = False, ccy = None):
self.__date = date
if self.__date == None:
self.__date = dates.date.today()
self.__ccy = ccy
self.__dummy = d
|
ummy
def date(self):
return self.__date
def currency(self):
return self.__ccy
def isdummy(self):
return self.__dummy
class fixcash(singlecash):
'''
Fixed cahs. Notional equal the cash ammount
'''
def __init__(self, value = 0., *args, **kwargs):
super(fixcash,self).__init__(*args, **kwargs)
self.__val = value
def notional(self):
|
return self.__val
class lazycash(singlecash):
def __init__(self, value = 5.0, *args, **kwargs):
super(lazycash,self).__init__(*args, **kwargs)
self.__val = value
def __get_value(self):
try:
return self.__val.value
except:
return self.__val
value = property(fget = __get_value)
class coupon(lazycash):
def __init__(self, dcf = 1., notional = 1.0, *args, **kwargs):
super(coupon,self).__init__(*args, **kwargs)
self.__dcf = dcf
self.__notional = notional
def _descr(self):
return '%s %s%s' % (self.__class__.__name__,self.value,'%')
def dcf(self):
return self.__dcf
def notional(self):
return self.__notional
def cash(self):
try:
return 0.01*self.ndcf()*self.value
except:
return BADVALUE
|
davidovitch/prepost-wind-dlc
|
cluster-tools/run-node.py
|
Python
|
gpl-3.0
| 843
| 0.003559
|
#!python
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 25 22:38:43 2014
@author: dave
"""
import os
#from argparse import ArgumentParser
import argparse
imp
|
ort paramiko
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("script", help="python script to execute")
parser.add_argument("-n", "--node", default='g-080',
help="gorm node hoste name, between g-001 and g-080")
args = parser.parse_args()
# connect to
|
a node for the post processing
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname='g-080', username=os.environ['USER'])
stdin, stdout, stderr = client.exec_command('python %s' % args.script)
for line in stdout:
print '... ' + line.strip('\n')
client.close()
|
eadgarchen/tensorflow
|
tensorflow/contrib/estimator/python/estimator/extenders_test.py
|
Python
|
apache-2.0
| 10,770
| 0.00585
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License fo
|
r the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""extenders tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
i
|
mport numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.contrib.estimator.python.estimator import extenders
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training
def get_input_fn(x, y):
def input_fn():
dataset = dataset_ops.Dataset.from_tensor_slices({'x': x, 'y': y})
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
labels = features.pop('y')
return features, labels
return input_fn
class AddMetricsTest(test.TestCase):
def test_should_add_metrics(self):
input_fn = get_input_fn(
x=np.arange(4)[:, None, None], y=np.ones(4)[:, None])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(features):
return {'mean_x': metrics_lib.mean(features['x'])}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertIn('mean_x', metrics)
self.assertEqual(1.5, metrics['mean_x'])
# assert that it keeps original estimators metrics
self.assertIn('auc', metrics)
def test_should_error_out_for_not_recognized_args(self):
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(features, not_recognized):
_, _ = features, not_recognized
return {}
with self.assertRaisesRegexp(ValueError, 'not_recognized'):
estimator = extenders.add_metrics(estimator, metric_fn)
def test_all_supported_args(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(features, predictions, labels, config):
self.assertIn('x', features)
self.assertIsNotNone(labels)
self.assertIn('logistic', predictions)
self.assertTrue(isinstance(config, estimator_lib.RunConfig))
return {}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
estimator.evaluate(input_fn=input_fn)
def test_all_supported_args_in_different_order(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(labels, config, features, predictions):
self.assertIn('x', features)
self.assertIsNotNone(labels)
self.assertIn('logistic', predictions)
self.assertTrue(isinstance(config, estimator_lib.RunConfig))
return {}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
estimator.evaluate(input_fn=input_fn)
def test_all_args_are_optional(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn():
return {'two': metrics_lib.mean(constant_op.constant([2.]))}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertEqual(2., metrics['two'])
def test_overrides_existing_metrics(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
estimator.train(input_fn=input_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertNotEqual(2., metrics['auc'])
def metric_fn():
return {'auc': metrics_lib.mean(constant_op.constant([2.]))}
estimator = extenders.add_metrics(estimator, metric_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertEqual(2., metrics['auc'])
class ClipGradientsByNormTest(test.TestCase):
"""Tests clip_gradients_by_norm."""
def test_applies_norm(self):
optimizer = extenders.clip_gradients_by_norm(
training.GradientDescentOptimizer(1.0), clip_norm=3.)
with ops.Graph().as_default():
w = variables.Variable(1., name='weight')
x = constant_op.constant(5.)
y = -x * w
grads = optimizer.compute_gradients(y, var_list=[w])[0]
opt_op = optimizer.minimize(y, var_list=[w])
with training.MonitoredSession() as sess:
grads_value = sess.run(grads)
self.assertEqual(-5., grads_value[0])
sess.run(opt_op)
new_w = sess.run(w)
self.assertEqual(4., new_w) # 1 + 1*3 (w - lr * clipped_grad)
def test_name(self):
optimizer = extenders.clip_gradients_by_norm(
training.GradientDescentOptimizer(1.0), clip_norm=3.)
self.assertEqual('ClipByNormGradientDescent', optimizer.get_name())
class ForwardFeaturesTest(test.TestCase):
"""Tests forward_features."""
def test_forward_single_key(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
self.assertNotIn('id', next(estimator.predict(input_fn=input_fn)))
estimator = extenders.forward_features(estimator, 'id')
predictions = next(estimator.predict(input_fn=input_fn))
self.assertIn('id', predictions)
self.assertEqual(101, predictions['id'])
def test_forward_list(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
self.assertNotIn('id', next(estimator.predict(input_fn=input_fn)))
estimator = extenders.forward_features(estimator, ['x', 'id'])
predictions = next(estimator.predict(input_fn=input_fn))
self.assertIn('id', predictions)
self.assertIn('x', predictions)
self.assertEqual(101, predictions['id'])
self.assertEqual(3., predictions['x'])
def test_forward_all(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
self.assertNotIn('id', next(estimator.predict(input_fn=input_fn)))
self.assertNotIn('x', next(estimator.predict(input_fn=input_fn)))
estimator = extenders.forward_features(estimator)
predictions = next(estimator.predict(input_fn=input_fn))
self.assertIn('id', predictions)
self.assertIn('x', predictions)
self.assertEqual(101, predictions['id'])
self.assertEqual(3., predictions['x'])
def test_key_should_be_string(self):
estimator = linear.LinearRegressor([fc.numeric_column('x')])
with self.assertRaisesRegexp(TypeError, 'keys should be either a string'):
extenders.forward_features(estimator, estimator)
def test_key_should_be_list_of_string(self):
estimator = linear.LinearRegressor([fc.numeric_column('x')])
with self.assertRaisesRegexp(TypeError, 'should be a string'):
extenders.forward_features(estimator, ['x', estimator])
def test_key_should_be_in_features(sel
|
Huyuwei/tvm
|
python/tvm/hybrid/preprocessor.py
|
Python
|
apache-2.0
| 4,765
| 0.005247
|
# Licensed to the Apache Software Founda
|
tion (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# soft
|
ware distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Determines the declaration, r/w status, and last use of each variable"""
import ast
import sys
from .runtime import HYBRID_GLOBALS
from .util import _internal_assert
class PyVariableUsage(ast.NodeVisitor):
"""The vistor class to determine the declaration, r/w status, and last use of each variable"""
#pylint: disable=invalid-name
#pylint: disable=missing-docstring
def __init__(self, args, symbols, closure_vars):
self.status = {}
self.scope_level = []
self._args = {}
self.args = args
self.aug_assign_ = False
self.symbols = symbols
self.closure_vars = closure_vars
def visit_FunctionDef(self, node):
self.scope_level.append(node)
_internal_assert(len(node.args.args) == len(self.args), \
'#arguments passed should be the same as #arguments defined')
for idx, arg in enumerate(node.args.args):
_attr = 'id' if sys.version_info[0] < 3 else 'arg' # To make py2 and 3 compatible
self._args[getattr(arg, _attr)] = self.args[idx]
for i in node.body:
self.visit(i)
def visit_For(self, node):
_internal_assert(isinstance(node.target, ast.Name), \
"For's iterator should be an id")
self.visit(node.iter)
self.scope_level.append(node)
for i in node.body:
self.visit(i)
self.scope_level.pop()
def visit_Call(self, node):
#No function pointer supported so far
_internal_assert(isinstance(node.func, ast.Name), "Function call should be an id")
func_id = node.func.id
_internal_assert(func_id in list(HYBRID_GLOBALS.keys()) + \
['range', 'max', 'min', 'len'] + \
list(self.symbols.keys()), \
"Function call id not in intrinsics' list")
for elem in node.args:
self.visit(elem)
def visit_AugAssign(self, node):
self.aug_assign_ = True
self.generic_visit(node)
self.aug_assign_ = False
def visit_Name(self, node):
# If it is True or False, we do not worry about it!
if sys.version_info[0] == 2 and node.id in ['True', 'False']:
return
# If it is from the argument list or loop variable, we do not worry about it!
if node.id in self._args.keys():
return
fors = [loop.target.id for loop in self.scope_level if isinstance(loop, ast.For)]
if node.id in fors:
return
# The loop variable cannot be overwritten when iteration
_internal_assert(not isinstance(node.ctx, ast.Store) or node.id not in fors, \
"Iter var cannot be overwritten")
if node.id not in self.status.keys():
# It is a captured value in closure
if node.id in self.closure_vars:
try:
ast.literal_eval(str(self.closure_vars[node.id]))
except ValueError:
raise ValueError("Only support capturing constant values in closure")
return
_internal_assert(isinstance(node.ctx, ast.Store), \
'Undeclared variable %s' % node.id)
if self.aug_assign_:
raise ValueError('"First store" cannot be an AugAssign')
self.status[node.id] = (node, self.scope_level[-1], set())
else:
decl, loop, usage = self.status[node.id]
usage.add(type(node.ctx))
_internal_assert(loop in self.scope_level,
"%s is used out of the scope it is defined!" % node.id)
self.status[node.id] = (decl, loop, usage)
def determine_variable_usage(root, args, symbols, closure_vars):
"""The helper function for calling the dedicated visitor."""
visitor = PyVariableUsage(args, symbols, closure_vars)
visitor.visit(root)
return visitor.status
|
dekoza/django-getpaid
|
getpaid/backends/transferuj/models.py
|
Python
|
mit
| 47
| 0
|
d
|
ef build_models(payment_class):
|
return []
|
joaander/hoomd-blue
|
hoomd/dem/pair.py
|
Python
|
bsd-3-clause
| 15,876
| 0.001386
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R"""DEM pair potentials.
"""
import hoomd
import hoomd.md
import hoomd.md.nlist as nl
from math import sqrt
import json
from hoomd.dem import _dem
from hoomd.dem import params
from hoomd.dem import utils
class _DEMBase:
def __init__(self, nlist):
self.nlist = nlist
self.nlist.subscribe(self.get_rcut)
self.nlist.update_rcut()
self.cpp_force = None
def _initialize_types(self):
ntypes = hoomd.context.current.system_definition.getParticleData(
).getNTypes()
type_list = []
for i in range(0, ntypes):
type_list.append(hoomd.context.current.system_definition
.getParticleData().getNameByType(i))
if self.dimensions == 2:
for typ in type_list:
self.setParams2D(typ, [[0, 0]], False)
else:
for typ in type_list:
self.setParams3D(typ, [[0, 0, 0]], [], False)
def _connect_gsd_shape_spec(self, gsd):
# This is an internal method, and should not be called directly. See gsd.dump_shape() instead
if isinstance(gsd, hoomd.dump.gsd) and hasattr(
self.cpp_force, "connectDEMGSDShapeSpec"):
self.cpp_force.connectDEMGSDShapeSpec(gsd.cpp_analyzer)
else:
raise NotImplementedError(
"GSD Schema is not implemented for {}".format(
self.__class__.__name__))
def setParams2D(self, type, vertices, center=False):
"""Set the vertices for a given particle type.
Args:
type (str): Name of the type to set the shape of
vertices (list): List of (2D) points specifying the coordinates of the shape
center (bool): If True, subtract the center of mass of the shape from the vertices before setting them for the shape
Shapes are specified as a list of 2D coordinates. Edges will
be made between all adjacent pairs of vertices, including one
between the last and first vertex.
"""
itype = hoomd.context.current.system_definition.getParticleData(
).getTypeByName(type)
if not len(vertices):
vertices = [(0, 0)]
center = False
# explicitly turn into a list of tuples
if center:
vertices = [
(float(p[0]), float(p[1])) for p in utils.center(vertices)
]
else:
vertices = [(float(p[0]), float(p[1])) for p in vertices]
# update the neighbor list
rcutmax = 2 * (sqrt(max(x * x + y * y for (x, y) in vertices))
+ self.radius * 2**(1. / 6))
self.r_cut = max(self.r_cut, rcutmax)
self.vertices[type] = vertices
self.cpp_force.setRcut(self.r_cut)
self.cpp_force.setParams(itype, vertices)
def setParams3D(self, type, vertices, faces, center=False):
"""Set the vertices for a given particle type.
Args:
type (str): Name of the type to set the shape of
vertices (list): List of (3D) points specifying the coordinates of the shape
faces (list): List of lists of indices specifying which coordinates comprise each face of a shape.
center (bool): If True, subtract the center of mass of the shape from the vertices before setting them for the shape
Shapes are specified as a list of coordinates (`vertices`) and
another list containing one list for each polygonal face
(`faces`). The elements of each list inside `faces` are
integer indices specifying which vertex in `vertices` comprise
the face.
"""
itype = hoomd.context.current.system_definition.getParticleData(
).getTypeByName(type)
if not len(vertices):
vertices = [(0, 0, 0)]
faces = []
center = False
# explicitly turn into python lists
if center:
vertices = [(float(p[0]), float(p[1]), float(p[2]))
for p in utils.center(vertices, faces)]
else:
vertices = [
(float(p[0]), float(p[1]), float(p[2])) for p in vertices
]
faces = [[int(i) for i in face] for face in faces]
# update the neighbor list
rcutmax = 2 * (sqrt(max(x * x + y * y + z * z
for (x, y, z) in vertices))
+ self.radius * 2**(1. / 6))
self.r_cut = max(self.r_cut, rcutmax)
self.vertices[type] = vertices
self.cpp_force.setRcut(self.r_cut)
self.cpp_force.setParams(itype, vertices, faces)
def get_type_shapes(self):
"""Get all the types of shapes in the current simulation.
This assumes all 3D shapes are convex.
Examples:
Types depend on the number of shape vertices and system dimensionality.
One vertex will yield a Sphere (2D and 3D), while multiple vertices will
give a Polygon (2D) or ConvexPolyhedron (3D).
>>> mc.get_type_shapes() # one vertex in 3D
[{'type': 'Sphere', 'diameter': 1.0}]
>>> mc.get_type_shapes() # one vertex in 2D
[{'type': 'Sphere', 'diameter': 1.5}]
>>> mc.get_type_shapes() # multiple vertices in 3D
[{'type': 'ConvexPolyhedron', 'rounding_radius': 0.1,
'vertices': [[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]]}]
>>> mc.get_type_shapes() # multiple vertices in 2D
[{'type': 'Polygon', 'rounding_radius': 0.1,
'vertices': [[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]}]
Returns:
A list of dictionaries, one for each particle type in the system.
"""
type_shapes = self.cpp_force.getTypeShapesPy()
ret = [json.loads(json_string) for json_string in type_shapes]
return ret
class WCA(hoomd.md.force._force, _DEMBase):
R"""Specify a purely repulsive Weeks-Chandler-Andersen DEM force with a constant rounding radius.
Args:
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list to use
radius (float): Rounding radius :math:`r` to apply to the shape vertices
The effect is as if a ``hoomd.md.pair.lj`` interaction
with :math:`r_{cut}=2^{1/6}\sigma` and :math:`\sigma=2\cdot r`
were applied between the contact points of each pair of particles.
Examples::
# 2D system of squares
squares = hoomd.dem.pair.WCA(radius=.5)
squares.setParams('A', [[1, 1], [-1, 1], [-1, -1], [1, -1]])
# 3D system of rounded square plates
squarePlates = hoomd.dem.pair.WCA(radius=.5)
squarePlates.setParams('A',
vertices=[[1, 1, 0], [-1, 1, 0], [-1, -1, 0], [1, -1, 0]],
|
faces=[[0, 1, 2, 3]], center=False)
# 3D system of some conve
|
x shape specified by vertices
(vertices, faces) = hoomd.dem.utils.convexHull(vertices)
shapes = hoomd.dem.pair.WCA(radius=.5)
shapes.setParams('A', vertices=vertices, faces=faces)
"""
def __init__(self, nlist, radius=1.):
friction = None
self.radius = radius
self.autotunerEnabled = True
self.autotunerPeriod = 100000
self.vertices = {}
self.onGPU = hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled()
cppForces = {
(2, None, 'cpu'): _dem.WCADEM2D,
(2, None, 'gpu'): (_dem.WCADEM2DGPU if self.onGPU else None),
(3, None, 'cpu'): _dem.WCADEM3D,
(3, None, 'gpu'): (_dem.WCADEM3DGPU if self.onGPU else None)
}
self.dimensions = hoomd.context.current.system_definition.getNDimensions(
)
# initialize the base class
hoomd.md.force._force.__init__(self)
# interparticle cutoff radius, will be updated as shapes are added
self.r_cut = 2 * radius * 2**(1. / 6)
if friction is None:
|
mdworks2016/work_development
|
Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/zope/interface/tests/test_verify.py
|
Python
|
apache-2.0
| 19,156
| 0.00047
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" zope.interface.verify unit tests
"""
import unittest
# pylint:disable=inherit-non-class,no-method-argument,no-self-argument
class Test_verifyClass(unittest.TestCase):
verifier = None
def setUp(self):
self.verifier = self._get_FUT()
@classmethod
def _get_FUT(cls):
from zope.interface.verify import verifyClass
return verifyClass
_adjust_object_before_verify = lambda self, x: x
def _callFUT(self, iface, klass, **kwargs):
return self.verifier(iface,
self._adjust_object_before_verify(klass),
**kwargs)
def test_class_doesnt_implement(self):
from zope.interface import Interface
from zope.interface.exceptions import DoesNotImplement
class ICurrent(Interface):
pass
class Current(object):
pass
self.assertRaises(DoesNotImplement, self._callFUT, ICurrent, Current)
def test_class_doesnt_implement_but_classImplements_later(self):
from zope.interface import Interface
from zope.interface import classImplements
class ICurrent(Interface):
pass
class Current(object):
pass
classImplements(Current, ICurrent)
self._callFUT(ICurrent, Current)
def test_class_doesnt_have_required_method_simple(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenImplementation
class ICurrent(Interface):
def method():
pass
@implementer(ICurrent)
class Current(object):
pass
self.assertRaises(BrokenImplementation,
self._callFUT, ICurrent, Current)
def test_class_has_required_method_simple(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method():
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_class_doesnt_have_required_method_derived(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenImplementation
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
@implementer(IDerived)
class Current(object):
pass
self.assertRaises(BrokenImplementation,
self._callFUT, IDerived, Current)
def test_class_has_required_method_derived(self):
from zope.interface import Interface
from zope.interface import implementer
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
@implementer(IDerived)
class Current(object):
def method(self):
raise NotImplementedError()
self._callFUT(IDerived, Current)
def test_method_takes_wrong_arg_names_but_OK(self):
# We no longer require names to match.
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, b):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_not_enough_args(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_doesnt_take_required_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(*args):
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_doesnt_take_required_only_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(**kw):
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_extra_arg(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, b):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_extra_arg_with_default(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, b=None):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
|
def test_method_takes_only_positional_args(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a)
|
:
pass
@implementer(ICurrent)
class Current(object):
def method(self, *args):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_only_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, **kw):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_extra_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
|
MauHernandez/cyclope
|
demo/cyclope_project/locale/dbgettext/articles/article/como-funciona-la-biblioteca-biblioteca-multimedia/summary.py
|
Python
|
gpl-3.0
| 312
| 0.003289
|
# -*- coding: utf-8 -*-
gettext("""La “Biblioteca Multi
|
media” es el lugar desde donde se clasifican nuestros contenidos multimedia, es decir, todo lo que es imágenes, audios, videos, documentos, etc. que luego podremos relacionar entre sí y con artículos, y tendrán vistas
|
HTML en nuestro sitio web.""")
|
Sbalbp/DIRAC
|
Interfaces/scripts/dirac-admin-get-site-mask.py
|
Python
|
gpl-3.0
| 781
| 0.020487
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-get-site-mask
# Author : Stuart Paters
|
on
########################################################################
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Get the list of sites enabled in the mask for job submission
Us
|
age:
%s [options]
""" % Script.scriptName )
Script.parseCommandLine( ignoreErrors = True )
from DIRAC import exit as DIRACExit, gLogger
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
gLogger.setLevel('ALWAYS')
result = diracAdmin.getSiteMask(printOutput=True)
if result['OK']:
DIRACExit( 0 )
else:
print result['Message']
DIRACExit( 2 )
|
ESSS/conda-env
|
tests/test_print_env.py
|
Python
|
bsd-3-clause
| 4,520
| 0.002655
|
from conda_env.print_env import print_env
import os
import textwrap
import unittest
class EnvironmentAndAliasesTestCase(unittest.TestCase):
ENVIRONMENT = [
{'PATH' : ['mypath1', 'mypath2']},
{'PATH' : ['mypath3']},
{'ANY_LIST_REALLY' : ['something1', 'something2']},
{'SINGLE_VAR' : 'single_value'},
{'SINGLE_INT' : 200},
]
ALIASES = {
'my_ls' : 'ls -la'
}
def test_environment_and_aliases_bash(self):
old_environ = os.environ.copy()
old_pathsep = os.pathsep
try:
os.environ['SHELL'] = '/bin/bash'
os.pathsep = ':'
activate = print_env('activate', self.ENVIRONMENT, self.ALIASES)
assert activate == textwrap.dedent(
'''
export ANY_LIST_REALLY="something1:something2:$ANY_LIST_REALLY"
export PATH="mypath1:mypath2:mypath3:$PATH"
export SINGLE_INT="200"
export SINGLE_VAR="single_value"
alias my_ls="ls -la"
'''
).lstrip()
os.environ['PATH'] = '/usr/bin:mypath1:mypath2:mypath3:/usr/local/bin'
os.environ['SINGLE_VAR'] = 'single_value'
os.environ['ANY_LIST_REALLY'] = 'something1:something2:'
deactivate = print_env('deactivate', self.ENVIRONMENT, self.ALIASES)
assert deactivate == textwrap.dedent(
'''
unset ANY_LIST_REALLY
export PATH="/usr/bin:/usr/local/bin"
unset SINGLE_VAR
[ `alias | grep my_ls= | wc -l` != 0 ] && unalias my_ls
'''
).lstrip()
finally:
os.environ.clear()
os.environ.update(old_environ)
os.pathsep = old_pathsep
def test_environment_and_aliases_cmd(self):
old_environ = os.environ.copy()
old_pathsep = os.pathsep
try:
os.environ['SHELL'] = 'C:\\Windows\\system32\\cmd.exe'
os.pathsep = ';'
activate = print_env('activate', self.ENVIRONMENT, self.ALIASES)
assert activate == textwrap.dedent(
'''
set ANY_LIST_REALLY=something1;something2;%ANY_LIST_REALLY%
set PATH=mypath1;mypath2;mypath3;%PATH%
set SINGLE_INT=200
set SINGLE_VAR=single_value
doskey my_ls=ls -la
'''
).lstrip()
os.environ['PATH'] = 'C:\\bin;mypath1;mypath2;mypath3;C:\\Users\\me\\bin'
os.environ['SINGLE_VAR'] = 'single_value'
os.environ['ANY_LIST_REALLY'] = 'something1;something2;'
deactivate = print_env('deactivate', self.ENVIRONMENT, self.ALIASES)
assert deactivate == textwrap.dedent(
'''
set ANY_LIST_REALLY=
set PATH=C:\\bin;C:\\Users\\me\\bin
set SINGLE_VAR=
doskey my_ls=
'''
).lstrip()
finally:
os.environ.clear()
os.environ.update(old_environ)
os.pathsep = old_pathsep
def test_environment_and_aliases_tcc(self):
old_environ = os.environ.copy()
old_pathsep = os.pathsep
try:
os.environ['SHELL'] = 'C:\\Program Files\\tcmd\\TCC.EXE'
os.pathsep = ';'
activate = print_env('activate', self.ENVIRONMENT, self.ALIASES)
assert activate == textwrap.dedent(
'''
set ANY_LIST_REALLY=something1;something2;%ANY_LIST_REALLY%
set PATH=mypath1;mypath2;mypath3;%PATH%
set SINGLE_INT=200
set SINGLE_VAR=single_value
alias my_ls ls -la
'''
).lstrip()
os.environ['PATH'] = 'C:\\bin;mypath1;mypath
|
2;mypath3;C:\\Users\\me\\bin'
|
os.environ['SINGLE_VAR'] = 'single_value'
os.environ['ANY_LIST_REALLY'] = 'something1;something2;'
deactivate = print_env('deactivate', self.ENVIRONMENT, self.ALIASES)
assert deactivate == textwrap.dedent(
'''
set ANY_LIST_REALLY=
set PATH=C:\\bin;C:\\Users\\me\\bin
set SINGLE_VAR=
unalias my_ls
'''
).lstrip()
finally:
os.environ.clear()
os.environ.update(old_environ)
os.pathsep = old_pathsep
|
hipnusleo/laserjet
|
resource/pypi/paramiko-2.1.1/paramiko/kex_group1.py
|
Python
|
apache-2.0
| 5,689
| 0.001055
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Sof
|
tware Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in
|
the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
1024 bit key halves, using a known "p" prime and "g" generator.
"""
import os
from hashlib import sha1
from paramiko import util
from paramiko.common import max_byte, zero_byte
from paramiko.message import Message
from paramiko.py3compat import byte_chr, long, byte_mask
from paramiko.ssh_exception import SSHException
_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
c_MSG_KEXDH_INIT, c_MSG_KEXDH_REPLY = [byte_chr(c) for c in range(30, 32)]
b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7
b0000000000000000 = zero_byte * 8
class KexGroup1(object):
# draft-ietf-secsh-transport-09.txt, page 17
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
G = 2
name = 'diffie-hellman-group1-sha1'
hash_algo = sha1
def __init__(self, transport):
self.transport = transport
self.x = long(0)
self.e = long(0)
self.f = long(0)
def start_kex(self):
self._generate_x()
if self.transport.server_mode:
# compute f = g^x mod p, but don't send it yet
self.f = pow(self.G, self.x, self.P)
self.transport._expect_packet(_MSG_KEXDH_INIT)
return
# compute e = g^x mod p (where g=2), and send it
self.e = pow(self.G, self.x, self.P)
m = Message()
m.add_byte(c_MSG_KEXDH_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_REPLY)
def parse_next(self, ptype, m):
if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
return self._parse_kexdh_init(m)
elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
return self._parse_kexdh_reply(m)
raise SSHException('KexGroup1 asked to handle packet type %d' % ptype)
### internals...
def _generate_x(self):
# generate an "x" (1 < x < q), where q is (p-1)/2.
# p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
# therefore q can be approximated as a 2^1023. we drop the subset of
# potential x where the first 63 bits are 1, because some of those will be
# larger than q (but this is a tiny tiny subset of potential x).
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:]
if (x_bytes[:8] != b7fffffffffffffff and
x_bytes[:8] != b0000000000000000):
break
self.x = util.inflate_long(x_bytes)
def _parse_kexdh_reply(self, m):
# client mode
host_key = m.get_string()
self.f = m.get_mpint()
if (self.f < 1) or (self.f > self.P - 1):
raise SSHException('Server kex "f" is out of range')
sig = m.get_binary()
K = pow(self.f, self.x, self.P)
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init)
hm.add_string(host_key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, sha1(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
def _parse_kexdh_init(self, m):
# server mode
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.P - 1):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, self.P)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init)
hm.add_string(key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(H)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
|
wildchildyn/autism-website
|
yanni_env/lib/python3.6/site-packages/sqlalchemy/testing/assertsql.py
|
Python
|
gpl-3.0
| 12,590
| 0
|
# testing/assertsql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..engine.default import DefaultDialect
from .. import util
import re
import collections
import contextlib
from .. import event
from sqlalchemy.schema import _DDLCompiles
from sqlalchemy.engine.util import _distill_params
from sqlalchemy.engine import url
class AssertRule(object):
is_consumed = False
errormessage = None
consume_statement = True
def process_statement(self, execute_observed):
pass
def no_more_statements(self):
assert False, 'All statements are complete, but pending '\
'assertion rules remain'
class SQLMatchRule(AssertRule):
pass
class CursorSQL(SQLMatchRule):
consume_statement = False
def __init__(self, statement, params=None):
self.statement = statement
self.params = params
def process_statement(self, execute_observed):
stmt = execute_observed.statements[0]
if self.statement != stmt.statement or (
self.params is not None and self.params != stmt.parameters):
self.errormessage = \
"Testing for exact SQL %s parameters %s received %s %s" % (
self.statement, self.params,
stmt.statement, stmt.parameters
)
else:
execute_observed.statements.pop(0)
self.is_consumed = True
if not execute_observed.statements:
self.consume_statement = True
class CompiledSQL(SQLMatchRule):
def __init__(self, statement, params=None, dialect='default'):
self.statement = statement
self.params = params
self.dialect = dialect
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r'[\n\t]', '', self.statement)
return received_statement == stmt
def _compile_dialect(self, execute_observed):
if self.dialect == 'default':
return DefaultDialect()
else:
# ugh
if self.dialect == 'postgresql':
params = {'implicit_returning': True}
else:
params = {}
return url.URL(self.dialect).get_dialect()(**params)
def _received_statement(self, execute_observed):
"""reconstruct the statement and params in terms
of a target dialect, which for CompiledSQL is just DefaultDialect."""
context = execute_observed.context
compare_dialect = self._compile_dialect(execute_observed)
if isinstance(context.compiled.statement, _DDLCompiles):
compiled = \
context.compiled.statement.compile(
dialect=compare_dialect,
schema_translate_map=context.
execution_options.get('schema_translate_map'))
else:
compiled = (
context.compiled.statement.compile(
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
inline=context.compiled.inline,
schema_translate_map=context.
execution_options.get('schema_translate_map'))
)
_received_statement = re.sub(r'[\n\t]', '', util.text_type(compiled))
parameters = execute_observed.parameters
if not parameters:
_received_parameters = [compiled.construct_params()]
else:
_received_parameters = [
compiled.construct_params(m) for m in parameters]
return _received_statement, _received_parameters
def process_statement(self, execute_observed):
context = execute_observed.context
_received_statement, _received_parameters = \
self._received_statement(execute_observed)
params = self._all_params(context)
equivalent = self._compare_sql(execute_observed, _received_statement)
if equivalent:
if params is not None:
all_params = list(params)
all_received = list(_received_parameters)
while all_params and all_received:
param = dict(all_params.pop(0))
for idx, received in enumerate(list(all_received)):
# do a positive compare only
for param_key in param:
# a key in param did not match current
# 'received'
if param_key not in received or \
received[param_key] != param[param_key]:
break
else:
# all keys in param matched 'received';
# onto next param
del all_received[idx]
break
else:
# param did not match any entry
# in all_received
equivalent = False
break
if all_params or all_received:
equivalent = False
if equivalent:
self.is_consumed = True
self.errormessage = None
else:
self.errormessage = self._failure_message(params) % {
'received_statement': _received_statement,
'received_parameters': _received_parameters
}
def _all_params(self, context):
if self.params:
if util.callable(self.params):
|
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
return params
else:
return None
def _failure_message(self, expected_params):
return (
'Testing for compiled statement %r partial params %r, '
'received %%(received_statement)r with params '
'%%(received_parameters)r' % (
self.statement.repl
|
ace('%', '%%'), expected_params
)
)
class RegexSQL(CompiledSQL):
def __init__(self, regex, params=None):
SQLMatchRule.__init__(self)
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
self.dialect = 'default'
def _failure_message(self, expected_params):
return (
'Testing for compiled statement ~%r partial params %r, '
'received %%(received_statement)r with params '
'%%(received_parameters)r' % (
self.orig_regex, expected_params
)
)
def _compare_sql(self, execute_observed, received_statement):
return bool(self.regex.match(received_statement))
class DialectSQL(CompiledSQL):
def _compile_dialect(self, execute_observed):
return execute_observed.context.dialect
def _compare_no_space(self, real_stmt, received_stmt):
stmt = re.sub(r'[\n\t]', '', real_stmt)
return received_stmt == stmt
def _received_statement(self, execute_observed):
received_stmt, received_params = super(DialectSQL, self).\
_received_statement(execute_observed)
# TODO: why do we need this part?
for real_stmt in execute_observed.statements:
if self._compare_no_space(real_stmt.statement, received_stmt):
break
else:
raise AssertionError(
"Can't locate compiled statement %r in list of "
"statements actually invoked" % received_stmt)
return received_stmt, execute_observed.context.compiled_parameters
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r'[\n\t]', '', self.statement)
# convert our comparison statement to have the
# paramstyle of the received
paramstyle = execute_observed.context.dialect.paramstyle
|
franga2000/django-machina
|
tests/unit/forum/test_models.py
|
Python
|
bsd-3-clause
| 5,012
| 0.001197
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from django.core.exceptions import ValidationError
from machina.apps.forum.signals import forum_moved
from machina.core.db.models import get_model
from machina.test.context_managers import mock_signal_receiver
from machina.test.factories import PostFactory
from machina.test.factories import UserFactory
from machina.test.factories import build_category_forum
from machina.test.factories import build_link_forum
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_link_forum
from machina.test.factories import create_topic
Forum = get_model('forum', 'Forum')
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
@pytest.mark.django_db
class TestForum(object):
@pytest.fixture(autouse=True)
def setup(self):
self.u1 = UserFactory.create()
# Set up top-level forums: a category, a default forum and a link forum
self.top_level_cat = create_category_forum()
self.top_level_forum = create_forum()
self.top_level_link = create_link_forum()
def test_has_a_margin_level_two_times_greater_than_its_real_level(self):
# Run
sub_level_forum = create_forum(parent=self.top_level_forum)
# Check
assert self.top_level_forum.margin_level == 0
assert sub_level_forum.margin_level == 2
def test_category_cannot_be_the_child_of_another_category(self):
# Run & check
with pytest.raises(ValidationError):
cat = build_category_forum(parent=self.top_level_cat)
cat.full_clean()
def test_can_not_be_the_child_of_a_forum_link(self):
# Run & check
for forum_type, _ in Forum.TYPE_CHOICES:
with pytest.raises(ValidationError):
forum = build_link_forum(parent=self.top_level_link)
forum.full_clean()
def test_must_have_a_link_in_case_of_a_link_forum(self):
# Run & check
with pytest.raises(ValidationError):
forum = Forum(parent=self.top_level_forum, name='sub_link_forum', type=Forum.FORUM_LINK)
forum.full_clean()
def test_saves_its_numbers_of_posts_and_topics(self):
# Run & check
topic = create_topic(forum=self.top_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
assert self.top_level_forum.direct_posts_count == topic.posts.filter(approved=True).count()
assert self.top_level_forum.direct_topics_count == self.top_level_forum.topics.count()
topic2 = create_topic(forum=self.top_level_forum, poster=self.u1, approved=False)
PostFactory.create(topic=topic2, poster=self.u1, approved=False)
assert self.top_level_forum.direct_posts_count == \
topic.posts.filter(approved=True).count() + topic2.posts.filter(approved=True).count()
assert self.top_level_forum.direct_topics_count == \
self.top_level_forum.topics.filter(approved=True).count()
def test_can_indicate_its_appartenance_to_a_forum_type(self):
# Run & check
assert self.top_level_cat.is_category
assert self.top_level_forum.is_forum
assert self.top_level_link.is_link
def test_stores_its_last_post_datetime(self):
# Setup
sub_level_forum = create_forum(parent=self.top_level_forum)
topic = create_topic(forum=sub_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
# Run
p2 = PostFactory.create(topic=topic, poster=self.u1)
# Check
sub_level_forum.refresh_from_db()
assert sub_level_forum.last_post_on == p2.created
def test_can_reset_last_post_datetime_if_all_topics_have_been_deleted(self):
# Setup
sub_level_forum = create_forum(parent=self.top_level_forum)
to
|
pic = create_topic(forum=sub_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
# Run
topic.delete()
# Check
sub_level_forum.refresh_from_db()
assert sub_level_forum.last_post_on is
|
None
def test_can_send_a_specific_signal_when_a_forum_is_moved(self):
# Setup
topic = create_topic(forum=self.top_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
# Run & check
with mock_signal_receiver(forum_moved) as receiver:
self.top_level_forum.parent = self.top_level_cat
self.top_level_forum.save()
assert receiver.call_count == 1
def test_get_or_create(self):
forum, created = Forum.objects.get_or_create(name="Test Forum", type=0)
assert created is True
assert isinstance(forum, Forum)
assert forum.name == "Test Forum"
|
oscarmcm/AlzoMiVoz
|
alzomivoz/manage.py
|
Python
|
mit
| 316
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS
|
_MODULE", "config.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
from configurations.management import execute_from_command_line
execute_from_command_line(sys.ar
|
gv)
|
LockScreen/Backend
|
test.py
|
Python
|
mit
| 121
| 0.016529
|
from auth import authenticate
import sample_upload
def check
|
():
asdf = auth.authenticate()
return asdf
|
check()
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20171026B.py
|
Python
|
mit
| 1,315
| 0.006844
|
"""
[2017-10-26] Challeng
|
e #337 [Intermediate] Scrambled images
https://www.reddit.com/r/dailyprogrammer/comments/78twyd/20171026_challenge_337_intermediate_scrambled/
#Description
For this challenge you will get a couple of images containing a secret word, you will have to unscramble the images to
be able to read the words.
To unscramble the images you will have to line up all non-gray scale pixels on each "row" of the image.
#Formal Input
|
s & Outputs
You get a [scrambled](http://i.imgur.com/rMYBq14.png) image, which you will have to unscramble to get the
[original](http://i.imgur.com/wKaiHpv.png) image.
###Input description
Challenge 1: [input](http://i.imgur.com/F4SlYMn.png)
Challenge 2: [input](http://i.imgur.com/ycDwgXA.png)
Challenge 3: [input](http://i.imgur.com/hg9iVXA.png)
###Output description
You should post the correct images or words.
#Notes/Hints
The colored pixels are red (#FF0000, rgb(255, 0, 0))
#Bonus
Bonus: [input](http://i.imgur.com/HLc1UHv.png)
This image is scrambled both horizontally and vertically.
The colored pixels are a gradient from green to red ((255, 0, _), (254, 1, _), ..., (1, 254, _), (0, 255, _)).
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
|
synox/telewall
|
telewall/telewall/integrationtests/test_InT01.py
|
Python
|
gpl-3.0
| 1,091
| 0.007333
|
""" Integration test: permit call
"""
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
import logging
import nose
from nose.tools import *
import inte_testutils
from telewall.core.model import TelephoneNumber
from telewall.core.util import sleep_until
logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG)
logging.getLogger('telewall').setLevel(logging.DEBUG)
LOG = logging.getLogger(__name__)
def test_Anruf_erlauben():
u = inte_testutils.TestUtil()
u.unblock_callerid(TelephoneNumber('0790000001'))
call = u.make_call_to_incoming(callerid='0790000001')
LOG.info('call: %s', call)
sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5)
call.hangup()
states = call.get_call_states()
LOG.info('states: %s', states)
assert_true('Ringing' in states,
'Das analoge Telefon sollte angerufen worden s
|
ein, aber es gab
|
keinen "Ringing" Status.')
call.stop()
if __name__ == '__main__':
nose.runmodule()
|
eliben/llvm-clang-samples
|
tools/htmlize-ast-dump.py
|
Python
|
unlicense
| 13,724
| 0.000874
|
#-------------------------------------------------------------------------------
# htmlize-ast-dump.py: Turn a Clang AST dump (-ast-dump) into cross-linked HTML.
#
# Run with --help for usage information.
#
# Note: this script requires Python 3.4; earlier versions of Python 3 should
# work if you install the enum34 module.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
import argparse
import enum
import html
import io
import json
import pprint
import re
import sys
HTML_OUTPUT_TEMPLATE = r'''
<html>
<head>
<style>
.main_area, .nav_area {{
position: absolute;
left: 0;
right: 0;
}}
.main_area {{
top: 0;
height: 75%;
overflow: scroll;
background-color: black;
white-space: nowrap;
padding-left: 10px;
}}
.nav_area {{
bottom: 0;
height: 25%;
overflow: scroll;
background-color: #131313;
}}
#nav_title {{
margin-left: auto;
margin-right: auto;
width: 200px;
font-weight: bold;
color: white;
font-size: 140%;
}}
#nav_contents {{
font-family: Consolas,monospace;
font-size: 80%;
color: #AAAAAA;
padding: 10px;
}}
.my-pre {{
line-height: 0.8;
padding: 0px 0px;
font-family: Consolas,monospace;
font-size: 80%;
}}
a:link {{
text-decoration: underline;
color: inherit;
}}
a:visited {{
text-decoration: underline;
color: inherit;
}}
a:hover {{
text-decoration: underline;
color: #FFFFFF;
}}
a:active {{
text-decoration: underline;
color: #FFFFFF;
}}
.ansi-bold {{
font-weight: bold;
white-space: pre;
}}
.ansi-black {{
|
color: #000000;
white-space: pre;
}}
.ansi-red {{
color: #d23737;
white-space: pre;
}}
.ansi-green {{
color: #17b217;
white-space: pre;
}}
.ansi-yellow {{
color: #b26717;
white-space: pre;
}}
.ansi-blu
|
e {{
color: #2727c2;
white-space: pre;
}}
.ansi-magenta {{
color: #b217b2;
white-space: pre;
}}
.ansi-cyan {{
color: #17b2b2;
white-space: pre;
}}
.ansi-white {{
color: #f2f2f2;
white-space: pre;
}}
</style>
</head>
<body>
<div class="main_area">
<pre class="my-pre">{lines}
</pre>
</div>
<div class="nav_area">
<div id="nav_contents">[Click on node address for cross-reference]</div>
</div>
<!-- Javascript -->
<script type="text/javascript">
var nav_data = {nav_data};
{js_code}
</script>
</body>
</html>
'''
JS_CODE = r'''
MakeAnchorLink = function(addr) {
anchorname = 'anchor_' + addr
return '<a href="#' + anchorname + '">' + addr + '</a>'
}
OnAnchorClick = function(elem_id) {
var nav_entry = nav_data[elem_id];
var contents = '';
contents += nav_entry['name'] + ' ' + nav_entry['id'];
contents += '<ul>\n';
parent_id = nav_entry['parent'];
if (parent_id === null) {
contents += '<li>Parent: none</li>\n';
} else {
parent_name = nav_data[parent_id]['name']
contents += '<li>Parent: ' + parent_name + ' ' +
MakeAnchorLink(parent_id) + '</li>\n';
}
contents += '<li>Children:'
if (nav_entry['children'].length == 0) {
contents += 'none</li>'
} else {
contents += '\n<ul>\n'
for (var i = 0; i < nav_entry['children'].length; i++) {
child_id = nav_entry['children'][i];
child_name = nav_data[child_id]['name'];
contents += '<li>' + child_name + ' ' +
MakeAnchorLink(child_id) + '</li>\n';
}
contents += '</ul>\n'
}
contents += '<li>Users:'
if (nav_entry['users'].length == 0) {
contents += 'none</li>'
} else {
contents += '\n<ul>\n'
for (var i = 0; i < nav_entry['users'].length; i++) {
user_id = nav_entry['users'][i];
user_name = nav_data[user_id]['name'];
contents += '<li>' + user_name + ' ' +
MakeAnchorLink(user_id) + '</li>\n';
}
contents += '</ul>\n'
}
document.getElementById('nav_contents').innerHTML = contents;
}
'''
SPAN_TEMPLATE = r'<span class="{klass}">{text}</span>'
class Color(enum.Enum):
"""Colors with values corresponding to the ANSI codes.
"""
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
# Input is broken to tokens. A token is a piece of text with the style that
# applies to it. The text is decoded from binary to a string.
class Token:
def __init__(self, text, style):
self.text = text.decode('ascii')
self.style = style
def __repr__(self):
return 'Token<text={}, style={}>'.format(self.text, self.style)
class Style:
def __init__(self, color=Color.WHITE, bold=False):
self.color = color
self.bold = bold
def __repr__(self):
return 'Style<color={}, bold={}>'.format(self.color, self.bold)
ANSI_PATTERN = re.compile(rb'\x1b\[([^m]+)m')
def tokenize_line(line):
"""Produce (yield) a stream of tokens from an input line.
"""
# The end pos of the last pattern match.
last_end = 0
# Current style
cur_style = Style()
for match in ANSI_PATTERN.finditer(line):
preceding_text = line[last_end:match.start()]
yield Token(preceding_text, cur_style)
last_end = match.end()
# Set the current style according to the ANSI code in the match.
for ansi_code in (int(c) for c in match.group(1).split(b';')):
if ansi_code == 0:
# Reset
cur_style = Style()
elif ansi_code == 1:
cur_style.bold = True
else:
# Color code. Obtain from Color enum. This will bomb if the
# color code is invalid.
cur_style.color = Color(ansi_code)
leftover_text = line[last_end:]
yield Token(leftover_text, cur_style)
# Link injections happens on HTML level - everything is a string now.
ADDR_PATTERN = re.compile(r'0x[0-9a-fA-F]+')
def make_anchor_link(addr, link_text):
anchorname = 'anchor_' + addr
return '<a href="#' + anchorname + '">' + link_text + '</a>'
def make_anchor_target(addr):
anchorname = 'anchor_' + addr
return '<a id="' + anchorname + '"></a>'
def inject_links(html_line_chunks):
first_addr = True
for i, chunk in enumerate(html_line_chunks):
match = ADDR_PATTERN.search(chunk)
if match:
addr = match.group()
if first_addr:
# The first address encountered in the line is the address of
# the node the line describes. This becomes a link anchor.
#print(tok.text[match.start():match.end()], file=sys.stderr)
html_line_chunks[i] = (
chunk[:match.start()] +
make_anchor_target(addr) +
'<a onclick="OnAnchorClick(\'' + addr +
'\');" href="#javascript:void(0)">' +
chunk[match.start():] + '</a>')
first_addr = False
else:
# All other addresses refer to other nodes. These become links
# to anchors.
html_line_chunks[i] = (
chunk[:match.start()] +
make_anchor_link(addr, chunk[match.start():match.end()]) +
chunk[match.end():])
def analyze_line(tokens):
"""Analyzes the given line (a list of tokens).
Returns the tuple: <id>, <name>, <nesting level>, [<used id>...]
"
|
sjdv1982/seamless
|
docs/archive/0.2-cleanup/3D/test-sphere.py
|
Python
|
mit
| 7,136
| 0.002522
|
from seamless import context, cell, transformer, reactor
from seamless.lib import edit, display, link
from seamless.lib.gui.gl import glprogram, glwindow
import numpy as np
from scipy.spatial.distance import cdist
ctx = context()
ctx.params = context()
ctx.links = context()
ctx.code = context()
#for now, gen_sphere must be a reactor, because it has multiple outputs
#TODO: make it a transformer in a future version of seamless
c = ctx.params.gen_sphere = cell(("cson", "seamless", "reactor_params"))
ctx.links.params_gen_sphere = link(c, ".", "params-gen-sphere.cson")
rc
|
= ctx.gen_sphere = reactor(c)
c = ctx.code.gen_sphere = cell(("text", "code", "python"))
ctx.links.code_gen_sphere = link(c, ".", "cell-gen-sphere.py")
rc.code_start.cell().set("")
c.connect(rc.code_update)
rc.code_stop.cell().set("")
do_scale_params = {
"input":{"pin": "input", "dtype": "array"},
"scale":{"pin": "input", "dtype": "float"},
"output
|
":{"pin": "output", "dtype": "array"}
}
ctx.subdivisions = cell("int").set(3)
ctx.minimizations = cell("int").set(20)
ctx.scale = cell("float").set(3.5)
ctx.coordinates = cell("array").set_store("GL")
ctx.normals = cell("array").set_store("GL")
ctx.edges = cell("array").set_store("GL")
ctx.triangle_indices = cell("array").set_store("GL")
ctx.triangle_normals = cell("array").set_store("GL")
ctx.triangle_coordinates = cell("array").set_store("GL")
ctx.coordinates_prescale = cell("array")
ctx.do_scale = transformer(do_scale_params)
ctx.scale.connect(ctx.do_scale.scale)
ctx.coordinates_prescale.connect(ctx.do_scale.input)
ctx.do_scale.output.connect(ctx.coordinates)
ctx.do_scale.code.set("return scale * input")
ctx.triangle_coordinates_prescale = cell("array")
ctx.do_scale2 = transformer(do_scale_params)
ctx.scale.connect(ctx.do_scale2.scale)
ctx.triangle_coordinates_prescale.connect(ctx.do_scale2.input)
ctx.do_scale2.output.connect(ctx.triangle_coordinates)
ctx.do_scale2.code.set("return scale * input")
ctx.subdivisions.connect(ctx.gen_sphere.subdivisions)
ctx.minimizations.connect(ctx.gen_sphere.minimizations)
ctx.gen_sphere.coordinates.connect(ctx.coordinates_prescale)
ctx.gen_sphere.normals.connect(ctx.normals)
ctx.gen_sphere.edges.connect(ctx.edges)
ctx.gen_sphere.triangle_indices.connect(ctx.triangle_indices)
ctx.gen_sphere.triangle_coordinates.connect(ctx.triangle_coordinates_prescale)
ctx.gen_sphere.triangle_normals.connect(ctx.triangle_normals)
ctx.params.gen_uniforms = cell("json").set({
"input": {"pin": "input", "dtype": "json"},
"output": {"pin": "output", "dtype": "json"},
})
ctx.window = glwindow("Seamless OpenGL 3D Example")
# Uniforms
ctx.gen_uniforms = transformer(ctx.params.gen_uniforms)
ctx.gen_uniforms.code.cell().set("""
result = {
"u_modelview_matrix": input["modelview_matrix"],
"u_projection_matrix": input["projection_matrix"],
"u_normal_matrix": input["normal_matrix"],
"u_mvp_matrix": input["mvp_matrix"],
}
return result
""")
identity = np.eye(4).tolist()
ctx.uniforms = cell("json")
ctx.window.camera.connect(ctx.uniforms)
ctx.uniforms.connect(ctx.gen_uniforms.input)
# Lines program
ctx.params.lines = cell("cson")
ctx.links.lines = link(ctx.params.lines, ".", "lines.cson")
ctx.lines_program = glprogram(ctx.params.lines, with_window=False)
ctx.window.init.cell().connect(ctx.lines_program.init)
#ctx.window.paint.cell().connect(ctx.lines_program.paint) # taken over by selector
ctx.lines_program.repaint.cell().connect(ctx.window.update)
ctx.coordinates.connect(ctx.lines_program.array_coordinates)
ctx.edges.connect(ctx.lines_program.array_edges)
ctx.gen_uniforms.output.cell().connect(ctx.lines_program.uniforms)
# Lines shaders
ctx.lines_vertexshader = cell(("text", "code", "vertexshader"))
ctx.lines_fragmentshader = cell(("text", "code", "fragmentshader"))
ctx.links.lines_vertexshader = link(ctx.lines_vertexshader, ".", "lines.vert")
ctx.links.lines_fragmentshader = link(ctx.lines_fragmentshader, ".", "lines.frag")
ctx.lines_vertexshader.connect(ctx.lines_program.vertex_shader)
ctx.lines_fragmentshader.connect(ctx.lines_program.fragment_shader)
# Triangle shaders
ctx.tri_vertexshader = cell(("text", "code", "vertexshader"))
ctx.tri_fragmentshader = cell(("text", "code", "fragmentshader"))
ctx.links.tri_vertexshader = link(ctx.tri_vertexshader, ".", "triangles.vert")
ctx.links.tri_fragmentshader = link(ctx.tri_fragmentshader, ".", "triangles.frag")
# Smooth triangles program
ctx.params.tri = cell("cson")
ctx.links.tri = link(ctx.params.tri, ".", "triangles-smooth.cson")
ctx.tri_program = glprogram(ctx.params.tri, with_window=False)
ctx.window.init.cell().connect(ctx.tri_program.init)
#ctx.window.paint.cell().connect(ctx.tri_program.paint) # taken over by selector
ctx.tri_program.repaint.cell().connect(ctx.window.update)
ctx.coordinates.connect(ctx.tri_program.array_coordinates)
ctx.normals.connect(ctx.tri_program.array_normals)
ctx.triangle_indices.connect(ctx.tri_program.array_indices)
ctx.gen_uniforms.output.cell().connect(ctx.tri_program.uniforms)
ctx.tri_vertexshader.connect(ctx.tri_program.vertex_shader)
ctx.tri_fragmentshader.connect(ctx.tri_program.fragment_shader)
# Flat triangles program
ctx.params.ftri = cell("cson")
ctx.links.ftri = link(ctx.params.ftri, ".", "triangles-flat.cson")
ctx.ftri_program = glprogram(ctx.params.ftri, with_window=False)
ctx.window.init.cell().connect(ctx.ftri_program.init)
#ctx.window.paint.cell().connect(ctx.ftri_program.paint) # taken over by selector
ctx.ftri_program.repaint.cell().connect(ctx.window.update)
ctx.triangle_coordinates.connect(ctx.ftri_program.array_coordinates)
ctx.triangle_normals.connect(ctx.ftri_program.array_normals)
ctx.gen_uniforms.output.cell().connect(ctx.ftri_program.uniforms)
ctx.tri_vertexshader.connect(ctx.ftri_program.vertex_shader)
ctx.tri_fragmentshader.connect(ctx.ftri_program.fragment_shader)
#Program selector
c = ctx.params.selector = cell(("cson", "seamless", "reactor_params"))
ctx.links.params_selector = link(c, ".", "params-selector.cson")
s = ctx.selector = reactor(c)
ctx.window.paint.cell().connect(s.paint)
s.paint_lines.cell().connect(ctx.lines_program.paint)
s.paint_triangles_smooth.cell().connect(ctx.tri_program.paint)
s.paint_triangles_flat.cell().connect(ctx.ftri_program.paint)
s.code_start.cell().set("state = 4")
s.code_stop.cell().set("")
c = ctx.code.selector = cell(("text", "code", "python"))
ctx.links.code_selector = link(c, ".", "cell-selector.py")
c.connect(s.code_update)
ctx.window.last_key.cell().connect(s.key)
s.repaint.cell().connect(ctx.window.update)
#kludge: must_be_defined does not work yet for input pins (TODO)
s.key.cell().set(" ")
s.key.cell().resource.save_policy = 4
#/kludge
#Kick-start the rendering
s.repaint.cell().set()
#Parameter editing
ctx.edit = context()
ctx.edit.scale = edit(ctx.scale, "Scale")
ctx.edit.subdivisions = edit(ctx.subdivisions, "Subdivisions")
ctx.edit.subdivisions.maximum.cell().set(7)
ctx.edit.minimizations = edit(ctx.minimizations, "Minimizations")
ctx.edit.minimizations.maximum.cell().set(100)
ctx.tofile("test-sphere.seamless", backup=False)
print("In the 3D window, press key 1-4 to change the states")
|
MarsZone/DreamLand
|
muddery/statements/default_statement_func_set.py
|
Python
|
bsd-3-clause
| 1,790
| 0
|
"""
Default statement functions.
"""
from muddery.statements.statement_func_set import BaseStatementFuncSet
import muddery.statements.action as action
import muddery.statements.condition as condition
import muddery.statements.attribute as attribute
import muddery.statements.rand as rand
import muddery.statements.skill as skill
class ActionFuncSet(BaseStatementFuncSet):
"""
Statement functions used in actions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(attribute.FuncSetAttr)
self.add(attribute.FuncRemoveAttr)
self.add(action.FuncLearnSkill)
self.add(action.FuncGiveObject)
self.add(action.FuncRemoveObjects)
self
|
.add(action.FuncTeleportTo)
self.add(action.FuncFightMob)
self.add(action.FuncFightTa
|
rget)
class ConditionFuncSet(BaseStatementFuncSet):
"""
Statement functions used in conditions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(condition.FuncIsQuestInProgress)
self.add(condition.FuncCanProvideQuest)
self.add(condition.FuncIsQuestCompleted)
self.add(condition.FuncHasObject)
self.add(attribute.FuncGetAttr)
self.add(attribute.FuncHasAttr)
self.add(attribute.FuncCheckAttr)
self.add(rand.FuncOdd)
self.add(rand.FuncRand)
self.add(rand.FuncRandInt)
class SkillFuncSet(BaseStatementFuncSet):
"""
Statement functions used in actions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(skill.FuncEscape)
self.add(skill.FuncHeal)
self.add(skill.FuncHit)
self.add(skill.FuncIncreaseMaxHP)
|
ellisonbg/nbgrader
|
nbgrader/tests/preprocessors/test_saveautogrades.py
|
Python
|
bsd-3-clause
| 8,576
| 0.001632
|
import pytest
from nbformat.v4 import new_notebook, new_output
from ...preprocessors import SaveCells, SaveAutoGrades
from ...api import Gradebook
from ...utils import compute_checksum
from .base import BaseTestPreprocessor
from .. import (
create_grade_cell, create_grade_and_solution_cell, create_solution_cell)
@pytest.fixture
def preprocessors():
return (SaveCells(), SaveAutoGrades())
@pytest.fixture
def gradebook(request, db):
gb = Gradebook(db)
gb.add_assignment("ps0")
gb.add_student("bar")
def fin():
gb.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def resources(db):
return {
"nbgrader": {
"db_url": db,
"assignment": "ps0",
"notebook": "test",
"student": "bar"
}
}
class TestSaveAutoGrades(BaseTestPreprocessor):
def test_grade_correct_code(self, preprocessors, gradebook, resources):
"""Is a passing code cell correctly graded?"""
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 1
assert grade_cell.max_score == 1
assert grade_cell.auto_score == 1
assert grade_cell.manual_score == None
assert not grade_cell.needs_manual_grade
def test_grade_incorrect_code(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_cell("hello", "code", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])]
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == 0
assert grade_cell.manual_score == None
assert not grade_cell.needs_manual_grade
def test_grade_unchanged_markdown(self, preprocessors, gradebook, resources):
"""Is an unchanged markdown cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == 0
assert grade_cell.manual_score == None
assert not grade_cell.needs_manual_grade
def test_grade_changed_markdown(self, preprocessors, gradebook, resources):
"""Is a changed markdown cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == None
assert grade_cell.manual_score == None
assert grade_cell.needs_manual_grade
def test_comment_unchanged_code(self, preprocessors, gradebook, resources):
"""Is an unchanged code cell given the correct comment?"""
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment == "No response."
def test_comment_changed_code(self, preprocessors, gradebook, resources):
"""Is a changed code cell given the correct comment?"""
cell = create_solution_cell("hello", "code", "foo")
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source
|
= "hello!"
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment is None
def test_comment_unchanged_markdown(self, preprocessors, gradebook, resources):
"""Is an unchanged markdown cell given the correct comment?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.meta
|
data.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment == "No response."
def test_comment_changed_markdown(self, preprocessors, gradebook, resources):
"""Is a changed markdown cell given the correct comment?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
comment = gradebook.find_comment("foo", "test", "ps0", "bar")
assert comment.auto_comment is None
def test_grade_existing_manual_grade(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
gradebook.add_submission("ps0", "bar")
cell.source = "hello!"
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 0
assert grade_cell.max_score == 1
assert grade_cell.auto_score == None
assert grade_cell.manual_score == None
assert grade_cell.needs_manual_grade
grade_cell.manual_score = 1
grade_cell.needs_manual_grade = False
gradebook.db.commit()
preprocessors[1].preprocess(nb, resources)
grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar")
assert grade_cell.score == 1
assert grade_cell.max_score == 1
assert grade_cell.auto_score == None
assert grade_cell.manual_score == 1
assert grade_cell.needs_manual_grade
def test_grade_existing_auto_comment(self, preprocessors, gradebook, resources):
"""Is a failing code cell correctly graded?"""
cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1)
cell.metadata.nbgrader['checksum'] = compute_checksum(cell)
nb = new_notebook()
nb.cells.append(cell)
preprocessors[0].preprocess(nb, resources)
|
baallezx/rsp
|
devtest/q3/3.py
|
Python
|
apache-2.0
| 990
| 0.061616
|
def creat
|
e_graph(contents):
"""
if you are given a -1 then it does not point to anything.
"""
d = {}
for i in xrange(len(co
|
ntents)):
d[i] = int(contents[i])
return d
def _cycle(graph, key, stack, stacks):
# print graph, key, stack
if ( key , graph[key] ) in stack: # you have found a cycle
stacks.append(stack)
return # True
elif graph[key] == -1: # dead end
return None # False
else:
stack.append( ( key , graph[key] ) )
_cycle( graph, graph[key], stack, stacks )
# print stack
def find_cycles(graph):
"""
find all the cycles in a Directed Acyclic Graph.
"""
stacks = []
for k,v in graph.items():
stack = []
_cycle(graph, k, stack, stacks)
results = []
for i in stacks:
i.sort()
if i not in results:
results.append(i)
#print i
print len(results)
if __name__ == "__main__":
import sys
l = sys.argv[1:]
r = open(l[0],'r').read().split('\n')
r.remove('')
# print r
N = r[0]
graph = create_graph(r[1:])
# print graph
find_cycles(graph)
|
lyw07/kolibri
|
kolibri/core/content/utils/transfer.py
|
Python
|
mit
| 6,229
| 0.001284
|
import logging
import os
import shutil
import requests
from requests.exceptions import ConnectionError
logger = logging.getLogger(__name__)
class ExistingTransferInProgress(Exception):
pass
class TransferNotYetCompleted(Exception):
pass
class TransferCanceled(Exception):
pass
class TransferNotYetClosed(Exception):
pass
class Transfer(object):
def __init__(
self,
source,
dest,
block_size=2097152,
remove_existing_temp_file=True,
timeout=20,
):
self.source = source
self.dest = dest
self.dest_tmp = dest + ".transfer"
self.block_size = block_size
self.timeout = timeout
self.started = False
self.completed = False
self.finalized = False
self.closed = False
# TODO (aron): Instead of using signals, have bbq/iceqube add
# hooks that the app calls every so often to determine whether it
# should shut down or not.
# signal.signal(signal.SIGINT, self._kill_gracefully)
# signal.signal(signal.SIGTERM, self._kill_gracefully)
assert not os.path.isdir(
dest
), "dest must include the target filename, not just directory path"
# ensure the directories in the destination path exist
try:
filedir = os.path.dirname(self.dest)
os.makedirs(filedir)
except OSError as e:
if e.errno == 17: # File exists (folder already created)
logger.debug(
"Not creating directory '{}' as it already exists.".format(filedir)
)
else:
raise
if os.path.isfile(self.dest_tmp):
if remove_existing_temp_file:
os.remove(self.dest_tmp)
else:
raise ExistingTransferInProgress(
"Temporary transfer destination '{}' already exists!".format(
self.dest_tmp
)
)
# record whether the destination file already exists, so it can be checked, but don't error out
self.dest_exists = os.path.isfile(dest)
# open the destination file for writing
self.dest_file_obj = open(self.dest_tmp, "wb")
def __next__(self): # proxy this method to fully support Python 3
return self.next()
def next(self):
try:
chunk = next(self._content_iterator)
except StopIteration:
self.completed = True
self.close()
self.finalize()
raise
self.dest_file_obj.write(chunk)
return chunk
def _move_tmp_to_dest(self):
shutil.move(self.dest_tmp, self.dest)
def __enter__(self):
self.start()
return self
def __exit__(self, *exc_details):
if not self.closed:
self.close()
if not self.completed:
self.cancel()
def _kill_gracefully(self, *args, **kwargs):
self.cancel()
raise TransferCanceled("The transfer was canceled.")
def cancel(self):
self.close()
try:
os.remove(self.dest_tmp)
except OSError:
pass
self.canceled = True
def finalize(self):
if not self.completed:
raise TransferNotYetCompleted(
"Transfer must have completed before it can be finalized."
)
|
if not self.closed:
raise TransferNotYetClosed(
"Transfer must be closed before it can be finalized."
)
if self.finalized:
return
self._move_tmp_to_dest()
self.finalized = True
def close(self):
self.dest_file_obj.close()
self.closed = True
class FileDownload(Transfer):
def __init__(self, *args, **kwargs)
|
:
# allow an existing requests.Session instance to be passed in, so it can be reused for speed
if "session" in kwargs:
self.session = kwargs.pop("session")
else:
# initialize a fresh requests session, if one wasn't provided
self.session = requests.Session()
super(FileDownload, self).__init__(*args, **kwargs)
def start(self):
# If a file download was stopped by Internet connection error,
# then open the temp file again.
if self.started:
self.dest_file_obj = open(self.dest_tmp, "wb")
# initiate the download, check for status errors, and calculate download size
self.response = self.session.get(self.source, stream=True, timeout=self.timeout)
self.response.raise_for_status()
try:
self.total_size = int(self.response.headers["content-length"])
except Exception:
# HACK: set the total_size very large so downloads are not considered "corrupted"
# in importcontent._start_file_transfer
self.total_size = 1e100
self.started = True
def __iter__(self):
assert self.started, "File download must be started before it can be iterated."
self._content_iterator = self.response.iter_content(self.block_size)
return self
def next(self):
try:
return super(FileDownload, self).next()
except ConnectionError as e:
logger.error("Error reading download stream: {}".format(e))
raise
def close(self):
self.response.close()
super(FileDownload, self).close()
class FileCopy(Transfer):
def start(self):
assert (
not self.started
), "File copy has already been started, and cannot be started again"
self.total_size = os.path.getsize(self.source)
self.source_file_obj = open(self.source, "rb")
self.started = True
def _read_block_iterator(self):
while True:
block = self.source_file_obj.read(self.block_size)
if not block:
break
yield block
def __iter__(self):
self._content_iterator = self._read_block_iterator()
return self
def close(self):
self.source_file_obj.close()
super(FileCopy, self).close()
|
draperlaboratory/stout
|
op_tasks/models.py
|
Python
|
apache-2.0
| 5,396
| 0.004818
|
from django.db import models
#from django.contrib.auth.models import User
from django.conf import settings
import hashlib
import time, datetime
def _createHash():
hash = hashlib.sha1()
hash.update(str(time.time()))
return hash.hexdigest()[:-10]
# the dataset class stores parameters about the
class Dataset(models.Model):
name = models.CharField(max_length=255) # name of dataset
version = models.CharField(max_length=10)
is_active = models.BooleanField(default=True)
def __unicode__(self): # Python 3: def __str__(self):
return '%s - %s' % (self.name, self.version)
class Meta:
unique_together = ("name", "version")
class Product(models.Model): # product = tool + dataset
dataset = models.ForeignKey(Dataset, null=True, blank=True) # data for tool
url = models.CharField(max_length=255, unique=False) # path to product
#url = models.CharField(max_length=255, unique=False) # path to product
team = models.CharField(max_length=255) # developer team
name = models.CharField(max_length=255) # name of
version = models.CharField(max_length=10)
is_active = models.BooleanField(default=True)
instructions = models.CharField(max_length=255)
def __unicode__(self): # Python 3: def __str__(self):
return '%s:%s:%s:%s' % (self.team, self.name, self.dataset, self.version)
class OpTask(models.Model):
dataset = models.ForeignKey(Dataset, null=True, blank=True)
name = models.CharField(max_length=200)
survey_url = models.CharField(max_length=255, unique=False)
is_active = models.BooleanField(default=True)
exit_url = models.CharField(max_length=255, unique=False)
instructions = models.CharField(max_length=255)
def __unicode__(self): # Python 3: def __str__(self):
return '%s-%s' % (self.name, self.dataset)
class Experiment(models.Model):
name = models.CharField(max_length=250) # name of the experiment
task_count = models.IntegerField(default=0)
task_length = models.IntegerField(default=30) # minutes
has_achievements = models.BooleanField(default=False)
has_intake = models.BooleanField(default=False)
intake_url = models.CharField(max_length=255, unique=False, blank=True, default='')
has_followup = models.BooleanField(default=False)
consent = models.BooleanField(default=True)
sequential_tasks = models.BooleanField(default=True)
show_progress = models.BooleanField(default=True)
timed = models.BooleanField(default=True)
# auto tasking with user registration. If FALSE then tasks must be
# assigned manually by admin
auto_tasking = models.BooleanField(default=False)
def __unicode__(self):
return '%s' % (self.name)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
user_hash = models.CharField(max_length=30, default=_createHash, unique=True, editable=False)
progress = models.IntegerField(default=0)
# additional user parameters
exp_inst_complete = models.BooleanField(default=False)
portal_inst_complete = models.BooleanField(default=False)
task_inst_complete = models.BooleanField(default=False)
intake_complete = models.BooleanField(default=False)
experiment = models.ForeignKey(Experiment, null=True, blank=True)
referrals = models.IntegerField(default=0)
bestGenAccuracy = models.IntegerField(default=0)
bestDevAccuracy = models.IntegerField(default=0)
def __unicode__(self):
return self.user.email
def read_instructions(self):
return self.exp_inst_complete and self.portal_inst_complete and self.task_inst_complete
# The TaskListItem model is used to manage user navigation through the experiment
class TaskListItem(models.Model):
# knows which user it is assigned to
userprofile = models.ForeignKey(UserProfile)
# knows which operational task
op_task = models.ForeignKey(OpTask)
product = models.ForeignKey(Product)
# is assigned an index in a list
index = models.IntegerField()
# mark if this operational task is the current task in the sequence
task_active = models.BooleanField(default=False)
# mark if operation task is completed
task_complete = models.BooleanField(default=False)
date_complete = models.DateTimeField(default=None, blank=True, null=True)
exit_active = models.BooleanField(default=False)
exit_complete = models.BooleanField(default=False)
activity_count = models.IntegerField(default=0)
d
|
ef _both_complete(self):
"returns whether both task and survey are complete"
return self.exit_complete and self.task_complete
both_complete = property(
|
_both_complete)
def __unicode__(self): # Python 3: def __str__(self):
return '%s, %s, %s' % (self.userprofile.user.email, self.op_task, self.index)
class Meta:
ordering = ('userprofile', 'index')
# index = models.IntegerField()
class Achievement(models.Model):
name = models.CharField(max_length=50)
desc = models.CharField(max_length=1000)
def __unicode__(self):
return '%s' % (self.name)
class UserAchievement(models.Model):
userprofile = models.ForeignKey(UserProfile)
achievement = models.ForeignKey(Achievement)
def __unicode__(self):
return '%s - %s' % (self.userprofile.user.email, self.achievement.name)
|
masschallenge/impact-api
|
web/impact/impact/v1/events/user_became_desired_mentor_event.py
|
Python
|
mit
| 362
| 0
|
# MIT Licen
|
se
# Copyright (c) 2017 MassChallenge, Inc.
from accelerator.models import UserRole
from impact.v1.events.base_user_became_mentor_event import (
BaseUserBecameMentorEvent,
)
class UserBecameDesiredMentorEvent(BaseUserBecameMentorEvent):
EVENT_TYPE = "became desired mentor"
|
USER_ROLE = UserRole.DESIRED_MENTOR
ROLE_NAME = USER_ROLE
|
MadManRises/Madgine
|
shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/minitaur/actuatornet/proto2csv.py
|
Python
|
mit
| 1,333
| 0.020255
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#python proto2csv.py --proto_file=/tmp/logs/minitaur_log_2019-01-27-12-59-31 --csv_file=/tmp/logs/out.csv
#each line in csv contains: angle, velocity, action, torque
import tensorflow as tf
import argparse
import numpy
from pybullet_envs.minitaur.envs import minitaur_logging
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string("proto_file", "logs", "path to protobuf input file")
flags.DEFINE_string("csv_file", "file.csv", "poth to csv output file")
def main(argv):
del argv
lo
|
gging = minitaur_logging.MinitaurLogging()
episode = logging.restore_episode(FLAGS.pr
|
oto_file)
#print(dir (episode))
#print("episode=",episode)
fields = episode.ListFields()
recs = []
for rec in fields[0][1]:
#print(rec.time)
for motorState in rec.motor_states:
#print("motorState.angle=",motorState.angle)
#print("motorState.velocity=",motorState.velocity)
#print("motorState.action=",motorState.action)
#print("motorState.torque=",motorState.torque)
recs.append([motorState.angle, motorState.velocity, motorState.action, motorState.torque])
a = numpy.array(recs)
numpy.savetxt(FLAGS.csv_file, a, delimiter=",")
if __name__ == "__main__":
tf.app.run(main)
|
egtaonline/quiesce
|
test/test_eosched.py
|
Python
|
apache-2.0
| 6,888
| 0.003049
|
"""Tests for egta online scheduler"""
import asyncio
import contextlib
import random
import numpy as np
import pytest
from egtaonline import api
from egtaonline import mockserver
from gameanalysis import rsgame
from egta import countsched
from egta import eosched
# TODO general setup may be done with a fixture in python 3.6
@pytest.fixture(name="game")
def fix_game():
"""Fixture to create a standard game"""
return rsgame.empty([4, 4], [11, 11])
@pytest.mark.asyncio
async def test_basic_profile(game):
"""Test scheduling a standard profile"""
async with mockserver.server() as server, api.api() as egta:
sim = await egta.get_simulator(
server.create_simulator("sim", "1", delay_dist=lambda: random.random() / 10)
)
strats = dict(zip(game.role_names, game.strat_names))
symgrps = list(zip(game.role_names, game.num_role_players, game.strat_names))
await sim.add_strategies(strats)
egame = await egta.get_canon_game(sim["id"], symgrps)
profs = game.random_profiles(20)
# Schedule all new
|
profiles and verify it works
async with eosched
|
.eosched(game, egta, egame["id"], 0.1, 1, 10, 0, 0) as sched:
assert str(sched) == str(egame["id"])
assert game == rsgame.empty_copy(sched)
awaited = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(awaited)
assert np.allclose(pays[profs == 0], 0)
# Schedule old profiles and verify it still works
async with eosched.eosched(game, egta, egame["id"], 0.1, 1, 10, 0, 0) as sched:
awaited = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(awaited)
assert np.allclose(pays[profs == 0], 0)
# Schedule two at a time, in two batches
async with eosched.eosched(
game, egta, egame["id"], 0.1, 2, 10, 0, 0
) as base_sched:
sched = countsched.countsched(base_sched, 2)
awaited = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(awaited)
assert np.allclose(pays[profs == 0], 0)
# Try again now that everything should be scheduled
async with eosched.eosched(
game, egta, egame["id"], 0.1, 2, 10, 0, 0
) as base_sched:
sched = countsched.countsched(base_sched, 2)
awaited = await asyncio.gather(*[sched.sample_payoffs(p) for p in profs])
pays = np.stack(awaited)
assert np.allclose(pays[profs == 0], 0)
def _raise(ex):
"""Exception as function"""
raise ex
@pytest.mark.asyncio
async def test_exception_in_create(game):
"""Test exception creating eo scheduler"""
async with mockserver.server() as server, api.api() as egta:
sim = await egta.get_simulator(
server.create_simulator( # pragma: no branch pylint: disable=line-too-long
"sim", "1", delay_dist=lambda: random.random() / 10
)
)
strats = dict(zip(game.role_names, game.strat_names))
symgrps = list(zip(game.role_names, game.num_role_players, game.strat_names))
await sim.add_strategies(strats)
egame = await egta.get_canon_game(sim["id"], symgrps)
server.custom_response(lambda: _raise(TimeoutError))
with pytest.raises(TimeoutError):
async with eosched.eosched(game, egta, egame["id"], 0.1, 1, 25, 0, 0):
pass # pragma: no cover
@pytest.mark.asyncio
async def test_exception_in_get(game):
"""Test exception in await"""
async with mockserver.server() as server, api.api() as egta:
sim = await egta.get_simulator(
server.create_simulator("sim", "1", delay_dist=lambda: random.random() / 10)
)
strats = dict(zip(game.role_names, game.strat_names))
symgrps = list(zip(game.role_names, game.num_role_players, game.strat_names))
await sim.add_strategies(strats)
egame = await egta.get_canon_game(sim["id"], symgrps)
profs = game.random_profiles(20)
async with eosched.eosched(game, egta, egame["id"], 0.1, 1, 10, 0, 0) as sched:
tasks = [asyncio.ensure_future(sched.sample_payoffs(p)) for p in profs]
await asyncio.sleep(0.1)
server.custom_response(lambda: _raise(TimeoutError))
await asyncio.sleep(0.1)
with pytest.raises(TimeoutError):
await asyncio.gather(*tasks)
# tidy up
errors = asyncio.gather(*tasks, return_exceptions=True)
errors.cancel()
with contextlib.suppress(TimeoutError, asyncio.CancelledError):
await errors
@pytest.mark.asyncio
async def test_exception_in_schedule(game):
"""Test exception throwin in scheduler"""
async with mockserver.server() as server, api.api() as egta:
sim = await egta.get_simulator(
server.create_simulator( # pragma: no branch pylint: disable=line-too-long
"sim", "1", delay_dist=lambda: random.random() / 10
)
)
strats = dict(zip(game.role_names, game.strat_names))
symgrps = list(zip(game.role_names, game.num_role_players, game.strat_names))
await sim.add_strategies(strats)
egame = await egta.get_canon_game(sim["id"], symgrps)
prof = game.random_profile()
async with eosched.eosched(game, egta, egame["id"], 0.1, 1, 25, 0, 0) as sched:
# so that enough calls to get_requirements are made
server.custom_response(lambda: _raise(TimeoutError))
await asyncio.sleep(0.1)
with pytest.raises(TimeoutError):
await sched.sample_payoffs(prof)
@pytest.mark.asyncio
async def test_scheduler_deactivate(game):
"""Test that scheduler ends when deactivated"""
async with mockserver.server() as server, api.api() as egta:
sim = await egta.get_simulator(
server.create_simulator( # pragma: no branch pylint: disable=line-too-long
"sim", "1", delay_dist=lambda: random.random() / 10
)
)
strats = dict(zip(game.role_names, game.strat_names))
symgrps = list(zip(game.role_names, game.num_role_players, game.strat_names))
await sim.add_strategies(strats)
egame = await egta.get_canon_game(sim["id"], symgrps)
# Schedule all new profiles and verify it works
# This first time should have to wait to schedule more
async with eosched.eosched(game, egta, egame["id"], 0.1, 1, 10, 0, 0) as sched:
# Deactivate scheduler
for esched in await egta.get_generic_schedulers():
await esched.deactivate()
with pytest.raises(ValueError):
await sched.sample_payoffs(game.random_profile())
|
iamdork/dork
|
dork/matcher.py
|
Python
|
mit
| 7,851
| 0.001019
|
import config
from git import Repository
import os
import yaml
from fnmatch import fnmatch
class Role:
@classmethod
def tree(cls, repository):
return RoleFactory(repository).tree()
@classmethod
def clear(cls, repository):
return RoleFactory(repository).clear()
def __init__(self, name, meta, repository):
"""
:type name: str
:type meta: dict
:type repository: Rep
|
ository
:return:
"""
self.repo = repository
self.name = name
self.factory = RoleFactory(repository)
self.__meta = meta
if 'dork' not in self.__meta:
self.__meta['dork'] = {}
|
self.__dependencies = []
self.__services = meta['dork']['services'] if 'services' in meta['dork'] else {}
if 'dependencies' in self.__meta and isinstance(self.__meta['dependencies'], list):
for dep in self.__meta['dependencies']:
if isinstance(dep, str):
self.__dependencies.append(dep)
if isinstance(dep, dict) and 'role' in dep:
self.__dependencies.append(dep['role'])
if 'build_triggers' in self.__meta['dork']:
self.__triggers = self.__meta['dork']['build_triggers']
# if matches is a simple list, create a default pattern
if not isinstance(self.__triggers, dict):
self.__triggers = {'default': self.__triggers}
else:
self.__triggers = {}
self.__matched_triggers = []
self.__enabled_triggers = []
self.__disabled_triggers = []
for trigger, patterns in self.__triggers.iteritems():
if isinstance(patterns, list):
# If filepatterns is a list, check them all.
fits = len(patterns) > 0
for pattern in patterns:
# If it's a dictionary, use key as filepattern and
# value as content regex.
if isinstance(pattern, dict):
fits = fits and all([repository.contains_file(gp, cp)
for gp, cp in pattern.iteritems()])
elif isinstance(pattern, str):
fits = fits and repository.contains_file(pattern)
if fits:
self.__matched_triggers.append(trigger)
elif isinstance(patterns, bool):
# If filepatterns is a boolean value, match the pattern accordingly.
if patterns and trigger == 'global':
self.__matched_triggers.append(trigger)
elif patterns:
self.__enabled_triggers.append(trigger)
else:
self.__disabled_triggers.append(trigger)
@property
def dependencies(self):
"""
:rtype: list[str]
"""
return self.__dependencies
def includes(self, name):
"""
Check if this role somehow includes another role.
:type name:
:rtype: bool
"""
return name in self.__dependencies or any([self.factory.get(dep).includes(name) for dep in self.__dependencies])
@property
def services(self):
"""
Recursively get all fixed ports.
:return: dict
"""
ports = {}
for dep in self.__dependencies:
ports.update(self.factory.get(dep).services)
ports.update(self.__services)
return ports
def triggers(self):
"""
Recursively get all defined triggers.
:return:
"""
triggers = self.__triggers
for dep in self.__dependencies:
role = self.factory.get(dep)
triggers.update(role.triggers())
return triggers
@property
def triggered(self):
return len(self.__matched_triggers) > 0
@property
def active_triggers(self):
"""
Get a list of triggers that are active for this repository.
:rtype: list[str]
"""
if len(self.__matched_triggers) == 0:
triggers = []
else:
triggers = self.__matched_triggers + self.__enabled_triggers
for dep in self.__dependencies:
role = self.factory.get(dep)
triggers += role.active_triggers
return list(set(triggers) - set(self.__disabled_triggers))
def update_triggers(self, changeset):
"""
Get a list of required update triggers.
:type changeset: list[str]
:rtype: list[str]
"""
# skip if there are no tags patterns defined
tags = []
if 'update_triggers' not in self.__meta['dork']:
self.__meta['dork']['update_triggers'] = []
for tagpattern in self.__meta['dork']['update_triggers']:
for pattern, taglist in tagpattern.iteritems():
for changed_file in changeset:
if fnmatch(changed_file, pattern):
tags += taglist
for dep in self.__dependencies:
role = self.factory.get(dep)
tags += role.update_triggers(changeset)
return list(set(tags))
@property
def settings(self):
settings = {}
for dep in self.dependencies:
settings.update(self.factory.get(dep).settings)
if 'settings' in self.__meta['dork']:
settings.update(self.__meta['dork']['settings'])
return settings
class RoleFactory:
__roles = {}
def __init__(self, repository):
self.__repo = repository
self.__dir = repository.directory
def clear(self):
if self.__dir in RoleFactory.__roles:
del RoleFactory.__roles[self.__dir]
def list(self):
if self.__dir not in RoleFactory.__roles:
roles = {}
role_directories = config.config.ansible_roles_path
project_role_path = self.__dir + '/.dork'
if os.path.isdir(project_role_path):
role_directories.append(project_role_path)
for roles_dir in role_directories:
for role in os.listdir(roles_dir):
meta_file = "%s/%s/meta/main.yml" % (roles_dir, role)
dork_file = "%s/%s/meta/dork.yml" % (roles_dir, role)
# Skip if name starts with a . or meta file doesn't exist.
if role.startswith('.') or not os.path.isfile(meta_file):
continue
meta = yaml.load(open(meta_file, 'r'))
if os.path.isfile(dork_file):
meta['dork'] = yaml.load(open(dork_file, 'r'))
if roles_dir == project_role_path:
if 'dork' not in meta:
meta['dork'] = {}
if 'build_triggers' not in meta['dork']:
meta['dork']['build_triggers'] = {}
meta['dork']['build_triggers']['global'] = True
# Write metadata back into the cache
roles[role] = Role(role, meta, repository=self.__repo)
RoleFactory.__roles[self.__dir] = roles
return RoleFactory.__roles[self.__dir]
def get(self, name):
roles = self.list()
if name in roles:
return roles[name]
def tree(self):
matching_roles = []
for name, role in self.list().iteritems():
if role.triggered:
matching_roles.append(role)
included_roles = []
for role in matching_roles:
if any([r.includes(role.name) for r in matching_roles]):
included_roles.append(role.name)
return [r for r in matching_roles if r.name not in included_roles]
|
wscullin/spack
|
var/spack/repos/builtin.mock/packages/extendee/package.py
|
Python
|
lgpl-2.1
| 1,547
| 0.000646
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gambli
|
n, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
|
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Extendee(Package):
"""A package with extensions"""
homepage = "http://www.example.com"
url = "http://www.example.com/extendee-1.0.tar.gz"
extendable = True
version('1.0', 'hash-extendee-1.0')
def install(self, spec, prefix):
mkdirp(prefix.bin)
|
firebase/grpc
|
tools/github/pr_latency.py
|
Python
|
apache-2.0
| 6,848
| 0.001606
|
#!/usr/bin/env python
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measure the time between PR creation and completion of all tests.
You'll need a github API token to avoid being rate-limited. See
https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/
This script goes over the most recent 100 pull requests. For PRs with a single
commit, it uses the PR's creation as the initial time; otherwise, it uses the
date of the last commit. This is somewhat fragile, and imposed by the fact that
GitHub reports a PR's updated timestamp for any event that modifies the PR (e.g.
comments), not just the addition of new commits.
In addition, it ignores latencies greater than five hours, as that's likely due
to a manual re-run of tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import pprint
import urllib2
from datetime import datetime, timedelta
logging.basicConfig(format='%(asctime)s %(message)s')
PRS = 'https://api.github.com/repos/grpc/grpc/pulls?state=open&per_page=100'
COMMITS = 'https://api.github.com/repos/grpc/grpc/pulls/{pr_number}/commits'
def gh(url):
request = urllib2.Request(url)
if TOKEN:
request.add_header('Authorization', 'to
|
ken {}'.format(TOKEN))
response = urllib2.urlopen(
|
request)
return response.read()
def print_csv_header():
print('pr,base_time,test_time,latency_seconds,successes,failures,errors')
def output(pr,
base_time,
test_time,
diff_time,
successes,
failures,
errors,
mode='human'):
if mode == 'human':
print(
"PR #{} base time: {} UTC, Tests completed at: {} UTC. Latency: {}."
"\n\tSuccesses: {}, Failures: {}, Errors: {}".format(
pr, base_time, test_time, diff_time, successes, failures,
errors))
elif mode == 'csv':
print(','.join([
str(pr),
str(base_time),
str(test_time),
str(int((test_time - base_time).total_seconds())),
str(successes),
str(failures),
str(errors)
]))
def parse_timestamp(datetime_str):
return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%SZ')
def to_posix_timestamp(dt):
return str((dt - datetime(1970, 1, 1)).total_seconds())
def get_pr_data():
latest_prs = json.loads(gh(PRS))
res = [{
'number': pr['number'],
'created_at': parse_timestamp(pr['created_at']),
'updated_at': parse_timestamp(pr['updated_at']),
'statuses_url': pr['statuses_url']
} for pr in latest_prs]
return res
def get_commits_data(pr_number):
commits = json.loads(gh(COMMITS.format(pr_number=pr_number)))
return {
'num_commits':
len(commits),
'most_recent_date':
parse_timestamp(commits[-1]['commit']['author']['date'])
}
def get_status_data(statuses_url, system):
status_url = statuses_url.replace('statuses', 'status')
statuses = json.loads(gh(status_url + '?per_page=100'))
successes = 0
failures = 0
errors = 0
latest_datetime = None
if not statuses: return None
if system == 'kokoro': string_in_target_url = 'kokoro'
elif system == 'jenkins': string_in_target_url = 'grpc-testing'
for status in statuses['statuses']:
if not status['target_url'] or string_in_target_url not in status[
'target_url']:
continue # Ignore jenkins
if status['state'] == 'pending': return None
elif status['state'] == 'success': successes += 1
elif status['state'] == 'failure': failures += 1
elif status['state'] == 'error': errors += 1
if not latest_datetime:
latest_datetime = parse_timestamp(status['updated_at'])
else:
latest_datetime = max(latest_datetime,
parse_timestamp(status['updated_at']))
# First status is the most recent one.
if any([successes, failures, errors
]) and sum([successes, failures, errors]) > 15:
return {
'latest_datetime': latest_datetime,
'successes': successes,
'failures': failures,
'errors': errors
}
else:
return None
def build_args_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--format',
type=str,
choices=['human', 'csv'],
default='human',
help='Output format: are you a human or a machine?')
parser.add_argument('--system',
type=str,
choices=['jenkins', 'kokoro'],
required=True,
help='Consider only the given CI system')
parser.add_argument(
'--token',
type=str,
default='',
help='GitHub token to use its API with a higher rate limit')
return parser
def main():
import sys
global TOKEN
args_parser = build_args_parser()
args = args_parser.parse_args()
TOKEN = args.token
if args.format == 'csv': print_csv_header()
for pr_data in get_pr_data():
commit_data = get_commits_data(pr_data['number'])
# PR with a single commit -> use the PRs creation time.
# else -> use the latest commit's date.
base_timestamp = pr_data['updated_at']
if commit_data['num_commits'] > 1:
base_timestamp = commit_data['most_recent_date']
else:
base_timestamp = pr_data['created_at']
last_status = get_status_data(pr_data['statuses_url'], args.system)
if last_status:
diff = last_status['latest_datetime'] - base_timestamp
if diff < timedelta(hours=5):
output(pr_data['number'],
base_timestamp,
last_status['latest_datetime'],
diff,
last_status['successes'],
last_status['failures'],
last_status['errors'],
mode=args.format)
if __name__ == '__main__':
main()
|
hoaibang07/Webscrap
|
sources/xmldemo/xmlcreate.py
|
Python
|
gpl-2.0
| 731
| 0.005472
|
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
from xml.dom import minidom
import io
"""
using xml.etree.ElementTree
"""
def pre
|
ttify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = etree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
root = Element('person')
tree = ElementTree(root)
name = Element('name')
root.append(name)
name.text = 'Julie'
root.set('id', '123')
# print etree.tostring(root)
print(prettify(root))
t
|
ree.write(open('person.xml', 'w'))
f2 = io.open('person2.xml', 'w', encoding = 'utf-8')
f2.write(prettify(root))
|
themoken/canto-next
|
plugins/sync-rsync.py
|
Python
|
gpl-2.0
| 11,388
| 0.005444
|
# Canto rsync Plugin
# by Jack Miller
# v1.1
# This implements a lightweight remote sync based around rsync to a remote
# server, or copying to mounted filesystem, etc.
ENABLED = False
#ENABLED = True
# SSH
# For ssh based rsync (remote hosts) you should have key authentication setup
# so it runs without prompting for a password.
#SYNC_LOCATION = "user@host:"
# Dropbox, assuming you have dropbox running
#SYNC_LOCATION = "~/Dropbox/"
# Mount / NFS / sshfs etc.
#SYNC_LOCATION = "/mnt/wherever/"
# Synchronization interval in seconds
|
INTERVAL = 5 * 60
# How long, in seconds, we should wait for the initial sync. Setting to 0 will
# cause a sync to occur before any other items can be read from disk, which
# ensures you won't see any old items, but also means a full sync has to occur
# before any items ma
|
ke it to the client and causes a long delay on startup.
INITIAL_SYNC = 30
#============================================
# Probably won't need to change these.
# rsync
# -a (archive mode) to preserve times / perms
# -v (verbose) to output interesting log info
# -z (compress) to save bandwidth
CMD = [ "rsync", "-avz"]
targets = { "db" : ".cantofeeds",
"conf" : ".cantoconf"
}
from canto_next.plugins import check_program
check_program("canto-daemon", "canto-remote")
if not ENABLED:
raise Exception("Plugin disabled.")
from canto_next.hooks import on_hook, call_hook
from canto_next.canto_backend import DaemonBackendPlugin
from canto_next.remote import DaemonRemotePlugin
from canto_next.config import parse_locks, parse_unlocks, config
from canto_next.locks import config_lock, feed_lock
from canto_next.feed import wlock_all, wunlock_all, rlock_all, runlock_all, allfeeds
from canto_next.tag import alltags
from tempfile import mkstemp
import subprocess
import logging
import shutil
import gzip
import json
import time
import os
log = logging.getLogger("SYNC-RSYNC")
class CantoFileSync(DaemonBackendPlugin):
def __init__(self, backend):
self.plugin_attrs = {
"cmd_sync" : self.cmd_sync,
"cmd_syncto" : self.cmd_syncto
}
self.backend = backend
# Plugin __init__ happens extremely early so that plugin types can be
# used in validating configuration, etc. We use the daemon_serving hook
# to do our work after the config and storage is setup.
on_hook("daemon_serving", self.setup)
def setup(self):
# Use setattributes and setconfigs commands to determine that we are the fresh
# copy that should be synchronized.
on_hook("daemon_end_loop", self.loop)
on_hook("daemon_pre_setconfigs", self.pre_setconfigs)
on_hook("daemon_pre_setattributes", self.pre_setattributes)
on_hook("daemon_exit", self.cmd_syncto)
self.reset()
# sync will grab files, check the timediff on the file if the file is
# actually newer (like we failed to sync last time) then it will set
# fresh_config and do a syncto.
self.sync_ts = 0
if (INITIAL_SYNC == 0):
self.cmd_sync()
elif (INITIAL_SYNC < INTERVAL):
self.sync_ts = time.time() - (INTERVAL - INITIAL_SYNC)
def reset(self):
self.fresh_config = False
self.sent_config = False
self.fresh_content = False
self.sent_content = False
# Use hooks to determine when we need to copy stuff.
def pre_setattributes(self, socket, args):
self.fresh_content = True
def pre_setconfigs(self, socket, args):
self.fresh_config = True
# Open a shelf at path, determine if it's been changed more recently than
# our current shelf.
def time_diff(self, path):
log.debug("Checking if %s is older than our shelf.", path)
try:
fp = gzip.open(path, "rt", 9, "UTF-8")
s = json.load(fp)
fp.close()
except:
# If something messed up, assume that the sync failed and
# pretend that we're newer anyway.
return -1
if "control" in s and "canto-user-modified" in s["control"]:
remote_stamp = s["control"]["canto-user-modified"]
else:
log.debug("Remote has no timestamp")
return -1
rlock_all()
if "control" in self.backend.shelf and "canto-user-modified" in self.backend.shelf["control"]:
local_stamp = self.backend.shelf["control"]["canto-user-modified"]
runlock_all()
else:
log.debug("We have no timestamp")
runlock_all()
return 1
if remote_stamp > local_stamp:
log.debug("db: We are older")
elif remote_stamp == local_stamp:
log.debug("db: We are equal")
else:
log.debug("db: We are newer")
return remote_stamp - local_stamp
def cmd_syncto(self, socket = None, args = None):
if self.fresh_content:
f, fname = mkstemp()
os.close(f)
# Lock feeds to make sure nothing's in flight
wlock_all()
# Sync the shelf so it's all on disk
self.backend.shelf.sync()
shutil.copyfile(self.backend.feed_path, fname)
# Let everything else continue
wunlock_all()
call_hook("daemon_syncto", [ "db", fname ])
# Cleanup temp file
os.unlink(fname)
self.fresh_content = False
self.sent_content = True
if self.fresh_config:
f, fname = mkstemp()
os.close(f)
config_lock.acquire_read()
shutil.copyfile(self.backend.conf_path, fname)
config_lock.release_read()
call_hook("daemon_syncto", [ "conf", fname ])
os.unlink(fname)
self.fresh_config = False
self.sent_config = True
def cmd_sync(self, socket = None, args = None):
needs_syncto = False
if not self.sent_config:
f, fname = mkstemp()
os.close(f)
call_hook("daemon_syncfrom", [ "conf", fname ])
conf_stat = os.stat(self.backend.conf_path)
sync_stat = os.stat(fname)
log.debug('conf: %s sync: %s' % (conf_stat.st_mtime, sync_stat.st_mtime))
diff = sync_stat.st_mtime - conf_stat.st_mtime
# Will be empty tempfile if syncfrom failed.
if sync_stat.st_size != 0:
if diff > 0:
log.debug("conf: We are older")
parse_locks()
shutil.move(fname, self.backend.conf_path)
config.parse()
parse_unlocks()
# Echo these changes to all connected sockets that care
for socket in self.backend.watches["config"]:
self.backend.in_configs({}, socket)
elif diff == 0:
log.debug("conf: We are equal")
os.unlink(fname)
else:
log.debug("conf: We are newer")
os.unlink(fname)
self.fresh_config = True
needs_syncto = True
else:
os.unlink(fname)
if not self.sent_content:
f, fname = mkstemp()
os.close(f)
call_hook("daemon_syncfrom", [ "db", fname ])
diff = self.time_diff(fname)
if diff > 0:
# Lock feeds to make sure nothing's in flight
wlock_all()
# Close the file so we can replace it.
self.backend.shelf.close()
shutil.move(fname, self.backend.feed_path)
self.backend.shelf.open()
# Clear out all of the currently tagged items. Usually on
# update, we're able to discard items that we have in old
# content, but aren't in new. But since we just replaced all of
# our old content with a totally fresh copy, we might not know
# they exist. Can't use reset
|
bebox/lhp
|
source/lhpFunctions.py
|
Python
|
unlicense
| 6,564
| 0.043419
|
import operator
def pozicijaSprite(broj, x_velicina):
#vraca pixel na kojem se sprite nalazi
pixel = broj * (x_velicina + 1) #1 je prazan red izmedu spritova
return(pixel)
#spriteSlova = ["A", "B", "C", "D", "E", "F", "G", "H", "i", "s", "e"]
spriteSlova = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "s", ",", "'", "1", "2", "4", "8", "6", "3",
|
".", "5", "7", "9", "0", "M", "B", "I", "N", "S", "E", "R", "T", " ", "-", "V","U" ,"A", "L", "O", "D", ":", "m", "j", "n", "u", "C", "H", "k", "l", "o", "p", "r", "t", "v", "z", "K", "P", "%", "/"]
def pixel2Ton(pixel):
rezolucija = 90
indent = -12 #extra pixeli
height = 3
broj = ( rezolucija - pixel - inde
|
nt ) / height
return(int(broj))
predikati = {
0 : 0,
1 : -1,
2 : 1,
3 : 0
}
kljucevi = {
0 : ("d", ",,"),
1 : ("e", ",,"),
2 : ("f", ",,"),
3 : ("g", ",,"),
4 : ("a", ",,"),
5 : ("h", ",,"),
6 : ("c", ","),
7 : ("d", ","),
8 : ("e", ","),
9 : ("f", ","),
10 : ("g", ","),
11 : ("a", ","),
12 : ("h", ","),
13 : ("c", ""),
14 : ("d", ""),
15 : ("e", ""),
16 : ("f", ""),
17 : ("g", ""),
18 : ("a", ""),
19 : ("h", ""),
20 : ("c", "'"),
21 : ("d", "'"),
22 : ("e", "'"),
23 : ("f", "'"),
24 : ("g", "'"),
25 : ("a", "'"),
26 : ("h", "'"),
27 : ("c", "''"),
28 : ("d", "''"),
29 : ("e", "''"),
30 : ("f", "''"),
31 : ("g", "''"),
32 : ("a", "''"),
33 : ("h", "''"),
34 : ("c", "'''"),
35 : ("d", "'''"),
36 : ("e", "'''"),
37 : ("f", "'''"),
38 : ("g", "'''"),
39 : ("a", "'''"),
40 : ("h", "'''")
}
def removeLily(slovo):
return(slovo.replace(',', '').replace('\'', '').upper())
def slovoPozicija(slovo):
for i in [i for i,x in enumerate(spriteSlova) if x == slovo]:
return(i)
rijecnikNotnihVrijednosti = {
0 : "16",
1 : "8",
2 : "8.",
3 : "4",
4 : "416",
5 : "4.",
6 : "4.16",
7 : "2",
8 : "216",
9 : "28",
10 : "28.",
11 : "2.",
12 : "2.16",
13 : "2.8",
14 : "2.8.",
15 : "1"
}
def pixel2Pozicija(pixel):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def pixel2Trajanje(pixel):
indent = 4
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def ton2Pixel(ton):
rezolucija = 90
indent = -12
height = 3
pixel = rezolucija - indent - ( ton * height )
return(pixel)
def pozicija2Pixel(pozicija):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
pixel = pozicija * width + indent
return(pixel)
def trajanje2Pixel(trajanje):
indent = 4
width = 6
pixel = trajanje * width + indent
return(pixel)
class dodaj_notu(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_chord(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_markup(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class cursor(object):
def __init__(self, pozicija, ton, trajanje):
self.pozicija = pozicija
self.ton = ton
self.trajanje = trajanje
self.sprite = 0
self.bg_scroll_x = 0
self.bg_scroll_y = 0
self.bg_scroll_x_offset = 0 #used for cursor follow efect
self.bg_scroll_y_offset = 0 #used for cursor follow efect
self.apsolute_x = 0 #used for cursor follow efect
self.apsolute_y = 0 #used for cursor follow efect
def checkXColision(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("kolizija na pocetku note s CL")
return(True)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note s CL")
return(True)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CL")
return(True)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("kolizija na pocetku note s CR")
return(True)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note sa CR")
return(True)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CR")
return(True)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("kolizija note unutar Cursora")
return(True)
else:
return(False)
#sortiraj listu klasa
#lista.sort(key=operator.attrgetter('broj'))
def findNote(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("na pocetku note s CL")
return(1)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("na sredini note s CL")
return(2)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CL")
return(3)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("na pocetku note s CR")
return(4)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("na sredini note sa CR")
return(5)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CR")
return(6)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("note unutar Cursora")
return(7)
else:
return(False)
letter2MidiNumberPrefix = {
"c" : "0",
"d" : "2",
"e" : "4",
"f" : "5",
"g" : "7",
"a" : "9",
"h" : "11",
}
letter2MidiOctave = {
",," : "24",
"," : "36",
"" : "48",
"'" : "60",
"''" : "72",
"'''" : "84",
}
predikat2Midi = {
0 : 0,
1 : 1,
2 : -1,
}
def nota2MidiNumber(nota):
return(int(letter2MidiNumberPrefix[kljucevi[nota.ton][0]]) + int(letter2MidiOctave[kljucevi[nota.ton][1]]) + int(predikat2Midi[nota.predikat]))
def get_git_revision_short_hash():
import subprocess
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
|
linglung/ytdl
|
youtube_dl/extractor/spankbang.py
|
Python
|
unlicense
| 2,167
| 0.001384
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SpankBangIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|[a-z]{2})\.)?spankbang\.com/(?P<id>[\da-z]+)/video'
_TESTS = [{
'url': 'http://spankbang.com/3vvn/video/fantasy+solo',
'md5': '1cc433e1d6aa14bc376535b8679302f7',
'info_dict': {
'id': '3vvn',
'ext': 'mp4',
'title': 'fantasy solo',
'description': 'Watch fantasy solo free HD porn video - 05 minutes - dillion harper masturbates on a bed free adult movies.',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'silly2587',
'age_limit': 18,
}
}, {
# 480p only
'url': 'http://spankbang.com/1vt0/video/solvane+gangbang',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
stream_key = self._html_search_regex(
r'''var\s+stream_key\s*=\s*['"](.+?)['"]''',
webpage, 'stream key')
formats = [{
|
'url': 'http://spankbang.com/_%s/%s/title/%sp__mp4' % (video_id, stream_key, height)
|
,
'ext': 'mp4',
'format_id': '%sp' % height,
'height': int(height),
} for height in re.findall(r'<(?:span|li|p)[^>]+[qb]_(\d+)p', webpage)]
self._check_formats(formats, video_id)
self._sort_formats(formats)
title = self._html_search_regex(
r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
uploader = self._search_regex(
r'class="user"[^>]*><img[^>]+>([^<]+)',
webpage, 'uploader', fatal=False)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'formats': formats,
'age_limit': age_limit,
}
|
BigRoy/lucidity
|
test/unit/test_template.py
|
Python
|
apache-2.0
| 12,561
| 0.001672
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from lucidity import Template, Resolver
from lucidity.error import ParseError, FormatError, ResolveError
class ResolverFixture(Resolver):
'''Example resolver.'''
def __init__(self, templates=None):
'''Initialise resolver with templates.'''
super(ResolverFixture, self).__init__()
self.templates = templates or []
def get(self, template_name, default=None):
'''Return template with *template_name*.
If no template matches then return *default*.
'''
for template in self.templates:
if template.name == template_name:
return template
return default
@pytest.fixture(scope='session')
def template_resolver():
'''Return template resolver instance.'''
resolver = ResolverFixture()
resolver.templates.extend([
Template('reference', '{variable}', template_resolver=resolver),
Template('nested', '/root/{@reference}', template_resolver=resolver)
])
return resolver
@pytest.mark.parametrize('pattern', [
'',
'{variable}',
'{dotted.variable}',
'{variable}/{variable}',
'{variable:\w+?}'
], ids=[
'empty',
'single variable',
'dotted variable',
'duplicate variable',
'custom expression'
])
def test_valid_pattern(pattern):
'''Construct template with valid pattern.'''
Template('test', pattern)
@pytest.mark.parametrize('pattern', [
'{}',
'{variable-dashed}',
'{variable:(?P<missing_closing_angle_bracket)}'
], ids=[
'empty placeholder',
'invalid placeholder character',
'invalid placeholder expression'
])
def test_invalid_pattern(pattern):
'''Construct template with invalid pattern.'''
with pytest.raises(ValueError):
Template('test', pattern)
@pytest.mark.parametrize(('pattern', 'path', 'expected'), [
('/static/string', '/static/string', {}),
('/single/{variable}', '/single/value', {'variable': 'value'}),
('/{variable}/{variable}', '/first/second', {'variable': 'second'}),
('/static/{variable:\d\{4\}}', '/static/1234', {'variable': '1234'}),
('/{a}/static/{b}', '/first/static/second', {'a': 'first', 'b': 'second'}),
('/{a.b.c}/static/{a.b.d}', '/first/static/second',
{'a': {'b': {'c': 'first', 'd': 'second'}}}),
('/{a}_{b}', '/first_second', {'a': 'first', 'b': 'second'}),
('/single/{@reference}', '/single/value', {'variable': 'value'}),
('{@nested}/reference', '/root/value/reference', {'variable': 'value'})
], ids=[
'static string',
'single variable',
'duplicate variable',
'custom variable expression',
'mix of static and variables',
'structured placeholders',
'neighbouring variables',
'single reference',
'nested reference'
])
def test_matching_parse(pattern, path, expected, template_resolver):
'''Extract data from matching path.'''
template = Template('test', pattern, template_resolver=template_resolver)
data = template.parse(path)
assert data == expected
@pytest.mark.parametrize(('pattern', 'path'), [
('/static/string', '/static/'),
('/single/{variable}', '/static/'),
('/static/{variable:\d+}', '/static/foo'),
('/single/{variable}/{@reference}', '/single/value/'),
('{@nested}/reference', '/root/value')
], ids=[
'string too short',
'missing variable',
'mismatching custom expression',
'string not accounting for reference',
'string not accounting for nested reference'
])
def test_non_matching_parse(pattern, path, template_resolver):
'''Extract data from non-matching path.'''
template = Template('test', pattern, template_resolver=template_resolver)
with pytest.raises(ParseError):
data = template.parse(path)
@pytest.mark.parametrize(('pattern', 'path', 'expected'), [
('/{variable}/{variable}', '/value/value', {'variable': 'value'}),
('/static/{variable:\d\{4\}}/other/{variable}', '/static/1234/other/1234',
{'variable': '1234'}),
('/{a.b.c}/static/{a.b.c}', '/value/static/value',
{'a': {'b': {'c': 'value'}}}),
('/{a}/{b}/other/{a}_{b}', '/a/b/other/a_b', {'a': 'a', 'b': 'b'}),
('{@nested}/{variable}', '/root/value/value', {'variable': 'value'})
], ids=[
'simple duplicate',
'duplicate with one specialised expression',
'structured duplicate',
'multiple duplicates',
'duplicate from reference'
])
def test_valid_parse_in_strict_mode(pattern, path, expected, template_resolver):
'''Extract data in strict mode when no invalid duplicates detected.'''
template = Template(
'test', pattern, duplicate_placeholder_mode=Template.STRICT,
template_resolver=template_resolver
)
data = template.parse(path)
assert data == expected
|
@pytest.mark.parametrize(('pattern', 'path'), [
('/{variable}/{variable}', '/a/b'),
('/static/{variable:\d\{4\}}/other/{variable}', '/static/1234/other/2345'),
('/{a.b.c}/static/{a.b.c}', '/c1/static/c2'),
('/{a}/{b}/other/{a}_{b}', '/a/b/other/c
|
_d'),
('{@nested}/{variable}', '/root/different/value')
], ids=[
'simple duplicate',
'duplicate with one specialised expression',
'structured duplicate',
'multiple duplicates',
'duplicate from reference'
])
def test_invalid_parse_in_strict_mode(pattern, path, template_resolver):
'''Fail to extract data in strict mode when invalid duplicates detected.'''
template = Template(
'test', pattern, duplicate_placeholder_mode=Template.STRICT,
template_resolver=template_resolver
)
with pytest.raises(ParseError) as exception:
template.parse(path)
assert 'Different extracted values' in str(exception.value)
@pytest.mark.parametrize(('path', 'anchor', 'expected'), [
('/static/value/extra', Template.ANCHOR_START, True),
('/static/', Template.ANCHOR_START, False),
('/extra/static/value', Template.ANCHOR_END, True),
('/static/value/extra', Template.ANCHOR_END, False),
('/static/value', Template.ANCHOR_BOTH, True),
('extra/static/value', Template.ANCHOR_BOTH, False),
('/static/value/extra', Template.ANCHOR_BOTH, False),
('extra/static/value/extra', None, True),
('extra/non/matching/extra', None, False)
], ids=[
'anchor_start:matching string',
'anchor_start:non-matching string',
'anchor_end:matching string',
'anchor_end:non-matching string',
'anchor_both:matching string',
'anchor_both:non-matching string prefix',
'anchor_both:non-matching string suffix',
'anchor_none:matching string',
'anchor_none:non-matching string'
])
def test_anchor(path, anchor, expected):
'''Parse path with specific anchor setting.'''
pattern = '/static/{variable}'
template = Template('test', pattern, anchor=anchor)
if not expected:
with pytest.raises(ParseError):
template.parse(path)
else:
data = template.parse(path)
assert data == {'variable': 'value'}
@pytest.mark.parametrize(('pattern', 'data', 'expected'), [
('/static/string', {}, '/static/string'),
('/single/{variable}', {'variable': 'value'}, '/single/value'),
('/{variable}/{variable}', {'variable': 'value'}, '/value/value'),
('/static/{variable:\d\{4\}}', {'variable': '1234'}, '/static/1234'),
('/{a}/static/{b}', {'a': 'first', 'b': 'second'}, '/first/static/second'),
('/{a.b.c}/static/{a.b.d}', {'a': {'b': {'c': 'first', 'd': 'second'}}},
'/first/static/second'),
('/single/{@reference}', {'variable': 'value'}, '/single/value'),
('{@nested}/reference', {'variable': 'value'}, '/root/value/reference'),
], ids=[
'static string',
'single variable',
'duplicate variable',
'custom variable expression',
'mix of static and variables',
'structured placeholders',
'reference',
'nested reference'
])
def test_format(pattern, data, expected, template_resolver):
'''Format data against pattern.'''
template = Template('test', pattern, template_resolver=template_resolver)
formatted = template.format(data)
assert formatted == expect
|
fishpepper/OpenSky
|
stylecheck/cpplint_unittest.py
|
Python
|
gpl-3.0
| 228,992
| 0.002559
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for cpplint.py."""
# TODO(unknown): Add a good test that tests UpdateIncludeState.
import codecs
import os
import random
import re
import sys
import unittest
import cpplint
# This class works as an error collector and replaces cpplint.Error
# function for the unit tests. We also verify each category we see
# is in cpplint._ERROR_CATEGORIES, to help keep that list up to date.
class ErrorCollector(object):
# These are a global list, covering all categories seen ever.
_ERROR_CATEGORIES = cpplint._ERROR_CATEGORIES
_SEEN_ERROR_CATEGORIES = {}
def __init__(self, assert_fn):
"""assert_fn: a function to call when we notice a problem."""
self._assert_fn = assert_fn
self._errors = []
cpplint.ResetNolintSuppressions()
def __call__(self, unused_filename, linenum,
category, confidence, message):
self._assert_fn(category in self._ERROR_CATEGORIES,
'Message "%s" has category "%s",'
' which is not in _ERROR_CATEGORIES' % (message, category))
self._SEEN_ERROR_CATEGORIES[category] = 1
if cpplint._ShouldPrintError(category, confidence, linenum):
self._errors.append('%s [%s] [%d]' % (message, category, confidence))
def Results(self):
if len(self._errors) < 2:
return ''.join(self._errors) # Most tests expect to have a string.
else:
return self._errors # Let's give a list if there is more than one.
def ResultList(self):
return self._errors
def VerifyAllCategoriesAreSeen(self):
"""Fails if there's a category in _ERROR_CATEGORIES~_SEEN_ERROR_CATEGORIES.
This should only be called after all tests are run, so
_SEEN_ERROR_CATEGORIES has had a chance to fully populate. Since
this isn't called from within the normal unittest framework
|
, we
can't use the normal unittest assert macros. Instead we just exit
when we see
|
an error. Good thing this test is always run last!
"""
for category in self._ERROR_CATEGORIES:
if category not in self._SEEN_ERROR_CATEGORIES:
sys.exit('FATAL ERROR: There are no tests for category "%s"' % category)
def RemoveIfPresent(self, substr):
for (index, error) in enumerate(self._errors):
if error.find(substr) != -1:
self._errors = self._errors[0:index] + self._errors[(index + 1):]
break
# This class is a lame mock of codecs. We do not verify filename, mode, or
# encoding, but for the current use case it is not needed.
class MockIo(object):
def __init__(self, mock_file):
self.mock_file = mock_file
def open(self, # pylint: disable-msg=C6409
unused_filename, unused_mode, unused_encoding, _):
return self.mock_file
class CpplintTestBase(unittest.TestCase):
"""Provides some useful helper functions for cpplint tests."""
def setUp(self):
# Allow subclasses to cheat os.path.abspath called in FileInfo class.
self.os_path_abspath_orig = os.path.abspath
def tearDown(self):
os.path.abspath = self.os_path_abspath_orig
# Perform lint on single line of input and return the error message.
def PerformSingleLineLint(self, code):
error_collector = ErrorCollector(self.assert_)
lines = code.split('\n')
cpplint.RemoveMultiLineComments('foo.h', lines, error_collector)
clean_lines = cpplint.CleansedLines(lines)
include_state = cpplint._IncludeState()
function_state = cpplint._FunctionState()
nesting_state = cpplint.NestingState()
cpplint.ProcessLine('foo.cc', 'cc', clean_lines, 0,
include_state, function_state,
nesting_state, error_collector)
# Single-line lint tests are allowed to fail the 'unlintable function'
# check.
error_collector.RemoveIfPresent(
'Lint failed to find start of function body.')
return error_collector.Results()
# Perform lint over multiple lines and return the error message.
def PerformMultiLineLint(self, code):
error_collector = ErrorCollector(self.assert_)
lines = code.split('\n')
cpplint.RemoveMultiLineComments('foo.h', lines, error_collector)
lines = cpplint.CleansedLines(lines)
nesting_state = cpplint.NestingState()
for i in xrange(lines.NumLines()):
nesting_state.Update('foo.h', lines, i, error_collector)
cpplint.CheckStyle('foo.h', lines, i, 'h', nesting_state,
error_collector)
cpplint.CheckForNonStandardConstructs('foo.h', lines, i,
nesting_state, error_collector)
nesting_state.CheckCompletedBlocks('foo.h', error_collector)
return error_collector.Results()
# Similar to PerformMultiLineLint, but calls CheckLanguage instead of
# CheckForNonStandardConstructs
def PerformLanguageRulesCheck(self, file_name, code):
error_collector = ErrorCollector(self.assert_)
include_state = cpplint._IncludeState()
nesting_state = cpplint.NestingState()
lines = code.split('\n')
cpplint.RemoveMultiLineComments(file_name, lines, error_collector)
lines = cpplint.CleansedLines(lines)
ext = file_name[file_name.rfind('.') + 1:]
for i in xrange(lines.NumLines()):
cpplint.CheckLanguage(file_name, lines, i, ext, include_state,
nesting_state, error_collector)
return error_collector.Results()
def PerformFunctionLengthsCheck(self, code):
"""Perform Lint function length check on block of code and return warnings.
Builds up an array of lines corresponding to the code and strips comments
using cpplint functions.
Establishes an error collector and invokes the function length checking
function following cpplint's pattern.
Args:
code: C++ source code expected to generate a warning message.
Returns:
The accumulated errors.
"""
file_name = 'foo.cc'
error_collector = ErrorCollector(self.assert_)
function_state = cpplint._FunctionState()
lines = code.split('\n')
cpplint.RemoveMultiLineComments(file_name, lines, error_collector)
lines = cpplint.CleansedLines(lines)
for i in xrange(lines.NumLines()):
cpplint.CheckForFunctionLengths(file_name, lines, i,
function_state, error_collector)
return error_collector.Results()
def PerformIncludeWhatYouUse(self, code, filename='foo.h', io=codecs):
# First, build up the include state.
error_collector = ErrorCollector(self.assert_)
include_state = cpplint._IncludeState()
nesting_state = cpplint.NestingState()
lines = code.split('\n')
cpplint.RemoveMultiLineComm
|
kevin2314/TextGame
|
game/main.py
|
Python
|
mit
| 4,805
| 0.019771
|
from inventory import Inventory
import cmd
from room import get_room
from player import Player
import textwrap
import time
import random
class Controls(cmd.Cmd):
prompt = '> '
def __init__(self):
#-----------------------------------------------------------------------
#Here the game is initialized asking for commands via the Cmd module,
#the variables given are the first room you start in and prints out the
#location
cmd.Cmd.__init__(self)
self.loc = get_room('intro')
self.look()
self.pos()
self.event = Events()
self.inventory = Inventory()
self.Player = Player()
#------------------------------------------------------------------------
#This checks which room you are in if you can go the way for the command
#given and prints out your location
def emptyline(self):
pass
def objects(self, args):
objects = self.loc._objects(args)
if objects is None:
print(('Ther are no ' + repr(args) + ' in the area' ))
self.look()
else:
self.look()
def move(self, dir):
newroom = self.loc._neighbor(dir)
if newroom is None:
print('''You cannot go this away''')
self.look()
else:
self.loc = get_room(newroom)
self.look()
# event.spawnAtPos()
def pos(self):
position = self.loc.name
def look(self):
# print((self.loc.name))
for line in textwrap.wrap(self.loc.description, 72):
print(line)
print('')
#-----------------------------------------------------------------------
#commands
#movement
def do_n(self, args):
'''goes north'''
self.move('n')
def do_s(self, args):
'''goes south'''
self.move('s')
def do_e(self, args):
'''goes east'''
self.move('e')
self.move('east')
def do_w(self, args):
'''goes west'''
self.move('w')
def do_climb(self, args):
'''Climbs where possible'''
self.move('climb')
def do_get(self, args):
'''Gets items from an area or from your bag'''
if self.inventory.slots[args] > 0:
self.player.right_hand(args)
else:
print('You do not have this item')
def do_enter(self, args):
'''Enters rooms, Villages, and caves where possible'''
self.move('enter')
def do_leave(self, args):
'''Exits the current room'''
self.move('leave')
def help_get(self):
for i in (textwrap.wrap(''' If you are trying to grab an item out from
your bag type get followed by the item in your bag, this applys to
items in an area as well''', 72)):
print(('', i))
#prompts
def do_sky(self, args):
self.event.sky()
def do_time(self, args):
self.event.timeOfDay()
|
def do_chop(self, args):
self.objects('trees')
def do_name(self, args):
'''Prints the users name if there is one'''
self.player.player_name()
def do_hand(self, args):
'''Prints what is in hand'''
if self.Player.hand() == ' ':
print("You are not holding anything")
else:
print(self.Player
|
.hand())
def do_next(self, args):
'''Gets the next event'''
self.move('next')
def do_look(self, args):
'''Prints the current area you are in'''
self.look()
def do_inventory(self, args):
'''Checks Inventory'''
self.inventory.bag()
self.look()
def do_quit(self, args):
'''Quits the game'''
print("thank you for playing")
return True
''' def do_pos(self, args):
print(self.loc.name) '''
class Events(object):
# In this events class we will handle all game events such as time,
# spawning of monsters, and possibly special event occurenses based on date, time of day
# I'm thinking of making this games time as the same as the system time.
def __init__(self):
self.room = Controls.pos
self.time = time
def timeOfDay(self):
print('The time is ' + time.strftime('%I:%M %p'))
def sky(self):
timeInInt = int(time.strftime("%I"))
timeInAmPm = time.strftime("%p")
if timeInAmPm == 'AM':
print("It is morning")
elif timeInAmPm == 'PM':
if timeInInt <= 5:
print("It is afternoon")
elif timeInInt > 5 & timeInInt <= 11:
print("It is night")
#-------------------------------------------------
# creature spawning
def spawAtPos(self):
chance = random.randrange(100)
for i in chance:
if i <= 49:
print("There is a monster in the area")
else:
print("The area seems safe for now")
if __name__ == '__main__':
c = Controls()
c.cmdloop()
|
openaid-IATI/OIPA
|
OIPA/iati/migrations/0031_remove_resultindicatorperiodtargetdimension_result_indicator_period.py
|
Python
|
agpl-3.0
| 372
| 0
|
# Gene
|
rated by Django 2.0.6 on 2018-08-31 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('iati', '0030_auto_20180831_1001'),
]
operations = [
migrations.RemoveField(
model_name='resultindicatorperiodtargetdimension',
name='result_indicator_period',
),
|
]
|
darogan/ParticleStats
|
setup.py
|
Python
|
gpl-3.0
| 3,803
| 0.012885
|
###############################################################################
# ____ _ _ _ ____ _ _ #
# | _ \ __ _ _ __| |_(_) ___| | ___/ ___|| |_ __ _| |_ ___ #
# | |_) / _` | '__| __| |/ __| |/ _ \___ \| __/ _` | __/ __| #
# | __/ (_| | | | |_| | (__| | __/___) | || (_| | |_\__ \ #
# |_| \__,_|_| \__|_|\___|_|\___|____/ \__\__,_|\__|___/ #
# #
###############################################################################
# ParticleStats: Open source software for the analysis of particle #
# motility and cytoskelteal polarity #
# #
# Contact: Russell.Hamilton@bioch.ox.ac.uk #
# http://www.ParticleStats.com #
# Department of Biochemistry, South Parks Road, #
# University of Oxford OX1 3QU #
# Copyright (C) 2009 Russell S. Hamilton #
###############################################################################
# GNU Licence Details: #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#######################################
|
########################################
from distutils.core import setup, Extension
module1 = Extension('ParticleStats_linRegressFit',
sources = ['src/ParticleStats_linRegressFit.c'])
setup(
name = 'ParticleStats',
version = '2.0',
author = 'Russell Hamilton',
author_email = 'darogan@gmail.com',
url = 'http://www.ParticleStats.com',
description = 'ParticleStats: open source software for the analysis of particle motility',
platforms = ["platform independent"],
license
|
= 'GPLv3',
packages = ['ParticleStats'],
#py_modules=[ "ParticleStats_Inputs.py", "ParticleStats_Maths.py", "ParticleStats_Outputs.py", "ParticleStats_Plots.py", "ParticleStats_RandomTrailGenerator.py", "ParticleStats_Vectors.py", "Test_Interactive_OSX_DivideUpLines.py", "Test_Interactive_OSX.py", "Test_Interactive.py" ],
scripts=["scripts/ParticleStats_Compare.py", "scripts/ParticleStats_Directionality.py", "scripts/ParticleStats_Kymographs.py", "scripts/ParticleStats_ROI.py", "scripts/ParticleStats_Trackmate.py", "scripts/ParticleStats_Vibtest.py", "scripts/TrackAlign.py", "scripts/ParticleStats_Behavioral.py"],
ext_modules = [module1],
package_data={
'ParticleStats': [
"examples/*.xls",
"examples/*.txt",
"examples/*.tif", ],
}
)
|
harshilasu/GraphicMelon
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/dynamodb/types.py
|
Python
|
gpl-3.0
| 10,121
| 0
|
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Some utility functions to deal with mapping Amazon DynamoDB types to
Python types and vice-versa.
"""
import base64
from decimal import (Decimal, DecimalException, Context,
Clamped, Overflow, Inexact, Underflow, Rounded)
from exceptions import DynamoDBNumberError
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, rounding=None, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
# python2.6 cannot convert floats directly to
# Decimals. This is taken from:
# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq
def float_to_decimal(f):
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = DYNAMODB_CONTEXT
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result
def is_num(n):
types = (int, long, float, bool, Decimal)
return isinstance(n, types) or n in types
def is_str(n):
return isinstance(n, basestring) or (isinstance(n, type) and
issubclass(n, basestring))
def is_binary(n):
return isinstance(n, Binary)
def serialize_num(val):
"""Cast a number to a string and perform
validation to ensure no loss of precision.
"""
if isinstance(val, bool):
return str(int(val))
return str(val)
def convert_num(s):
if '.' in s:
n = float(s)
else:
n = int(s)
return n
def convert_binary(n):
return Binary(base64.b64decode(n))
def get_dynamodb_type(val):
"""
Take a scalar Python value and return a string representing
the corresponding Amazon DynamoDB type. If the value passed in is
not a supported type, raise a TypeError.
"""
dynamodb_type = None
if is_num(val):
dynamodb_type = 'N'
elif is_str(val):
dynamodb_type = 'S'
elif isinstance(val, (set, frozenset)):
if False not in map(is_num, val):
dynamodb_type = 'NS'
elif False not in map(is_str, val):
dynamodb_type = 'SS'
elif False not in map(is_binary, val):
dynamodb_type = 'BS'
elif isinstance(val, Binary):
dynamodb_type = 'B'
if dynamodb_type is None:
msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
raise TypeError(msg)
return dynamodb_type
def dynamize_value(val):
"""
Take a scalar Python value and return a dict consisting
of the Amazon DynamoDB type specification and the value that
needs to be sent to Amazon DynamoDB. If the type of the value
is not supported, raise a TypeError
"""
dynamodb_type = get_dynamodb_type(val)
if dynamodb_type == 'N':
val = {dynamodb_type: serialize_num(val)}
elif dynamodb_type == 'S':
val = {dynamodb_type: val}
elif dynamodb_type == 'NS':
val = {dynamodb_type: map(serialize_num, val)}
elif dynamodb_type == 'SS':
val = {dynamodb_type: [n for n in val]}
elif dynamodb_type == 'B':
val = {dynamodb_type: val.encode()}
elif dynamodb_type == 'BS':
val = {dynamodb_type: [n.encode() for n in val]}
return val
class Binary(object):
def __init_
|
_(self, value):
if not isinstance(value, basestring):
raise TypeError('Value must be a string of binary data!')
self.value = value
def encode(self):
retur
|
n base64.b64encode(self.value)
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
else:
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%s)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
def item_object_hook(dct):
"""
A custom object hook for use when decoding JSON item bodys.
This hook will transform Amazon DynamoDB JSON responses to something
that maps directly to native Python types.
"""
if len(dct.keys()) > 1:
return dct
if 'S' in dct:
return dct['S']
if 'N' in dct:
return convert_num(dct['N'])
if 'SS' in dct:
return set(dct['SS'])
if 'NS' in dct:
return set(map(convert_num, dct['NS']))
if 'B' in dct:
return convert_binary(dct['B'])
if 'BS' in dct:
return set(map(convert_binary, dct['BS']))
return dct
class Dynamizer(object):
"""Control serialization/deserialization of types.
This class controls the encoding of python types to the
format that is expected by the DynamoDB API, as well as
taking DynamoDB types and constructing the appropriate
python types.
If you want to customize this process, you can subclass
this class and override the encoding/decoding of
specific types. For example::
'foo' (Python type)
|
v
encode('foo')
|
v
_encode_s('foo')
|
v
{'S': 'foo'} (Encoding sent to/received from DynamoDB)
|
V
decode({'S': 'foo'})
|
v
_decode_s({'S': 'foo'})
|
v
'foo' (Python type)
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr)
def encode(self, attr):
"""
Encodes a python type to the format expected
by DynamoDB.
"""
dynamodb_type = self._get_dynamodb_type(attr)
try:
encoder = getattr(self, '_encode_%s' % dynamodb_type.lower())
except AttributeError:
raise ValueError("Unable to encode dynamodb type: %s" %
dynamodb_type)
return {dynamodb_type: encoder(attr)}
def _encode_n(self, attr):
try:
if isinstance(attr, float) and not hasattr(Decimal, 'from_float'):
# python2.6 does not support creating Decimals directly
# from floats so we have to do this ourself.
n = str(float_to_decimal(attr))
else:
n = str(DYNAMODB_CONTEXT.create_decimal(attr))
if filter(lambda x: x in n, ('Infinity', 'NaN')):
raise TypeError('Infinity and NaN not supported')
return n
except (TypeError, DecimalException), e:
msg = '{0} numeric for `{1}`\n{2}'.format(
e.__class__.__name__, attr, str(e) or '')
raise DynamoDBNumberError(msg)
def _encode_s(self, attr):
if isinstance(attr, unicode):
attr = attr.encode('utf-8')
elif not isinstance(attr, str):
attr = str(attr)
return attr
def _encode_ns(self, attr):
return map(self._encode_n, attr)
def _encode_ss(self, attr):
|
google-research/language
|
language/bert_extraction/steal_bert_classifier/utils/preprocess_distill_input_watermark.py
|
Python
|
apache-2.0
| 5,277
| 0.006822
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Combine victim model's outputs (adding watermarks at random) with queries to form a new training dataset for extraction.
This script also outputs the watermark details for subsequent verification.
"""
import random
import numpy as np
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("task_name", "sst2", "Name of task to preprocess")
flags.DEFINE_string("sents_path", None, "Path containing sentence data")
flags.DEFINE_string("probs_path", None, "Path containing probability data")
flags.DEFINE_float("watermark_fraction", 0.001,
"Fraction of points that need to be watermarked")
flags.DEFINE_string("output_path", None, "Output path for preprocessing")
flags.DEFINE_string("watermark_path", None, "Output path for watermark")
flags.DEFINE_string("split_type", "train", "Type of preprocessing")
FLAGS = flags.FLAGS
num_labels = {"sst-2": 2, "mnli": 3}
mnli_map = {
"contradiction": "\t1\t0\t0",
"entailment": "\t0\t1\t0",
"neutral": "\t0\t0\t1"
}
def main(_):
task_name = FLAGS.task_name.lower()
with gfile.Open(FLAGS.sents_path, "r") as f:
sents_data = f.read().strip().split("\n")
header = sents_data[0] + "".join(
["\tlabel%d_prob" % i for i in range(num_labels[task_name])])
watermark_prob_str = "".join(
["\twatermark%d_prob" % i for i in range(num_labels[task_name])])
original_prob_str = "".join(
["\toriginal%d_prob" % i for i in range(num_labels[task_name])])
watermark_header = sents_data[0] + watermark_prob_str + original_prob_str
sents_data = sents_data[1:]
with gfile.Open(FLAGS.probs_path, "r") as f:
probs_data = f.read().strip().split("\n")
number_watermarks = int(FLAGS.watermark_fraction * len(sents_data))
watermark_ids = list(range(len(sents_data)))
random.shuffle(watermark_ids)
watermark_ids = {x: 1 for x in watermark_ids[:number_watermarks]}
if FLAGS.split_type == "train":
assert len(sents_data) == len(probs_data)
output_data = []
watermark_data = []
for i, (x, y) in enumerate(zip(sents_data, probs_data)):
if i in watermark_ids:
orig_prob_vector = np.array([float(yy) for yy in y.split("\t")])
new_prob_vector = np.array([float(yy) for yy in y.split("\t")])
while np.argmax(new_prob_vector) == np.argmax(orig_prob_vector):
np.random.shuffle(new_prob_vector)
# use watermarked input for the ne
|
w string
new_prob_str = "\t".join([str(yy) for yy in new_prob_vector])
output_data.append(x.strip() + "\t" + new_prob_str.strip())
# add the watermarked data for future checks
watermark_data.append(x.strip() + "\t" + new_prob_str.strip() + "\t" +
y.strip())
|
else:
output_data.append(x.strip() + "\t" + y.strip())
elif FLAGS.split_type == "train_argmax":
assert len(sents_data) == len(probs_data)
# Round the probability vectors before adding them to file
output_data = []
watermark_data = []
for i, (x, y) in enumerate(zip(sents_data, probs_data)):
# Convert tsv probability vector to numpy style array
prob_vector = np.array([float(yy) for yy in y.split("\t")])
# initialize a vector with zeros
argmax_prob_vector = np.zeros_like(prob_vector)
# keep only the argmax prediction
argmax_prob_vector[np.argmax(prob_vector)] = 1.0
argmax_prob_str = "\t".join([str(yy) for yy in argmax_prob_vector])
if i in watermark_ids:
new_prob_vector = np.copy(argmax_prob_vector)
while np.argmax(new_prob_vector) == np.argmax(argmax_prob_vector):
np.random.shuffle(new_prob_vector)
# use watermarked input for the new string
new_prob_str = "\t".join([str(yy) for yy in new_prob_vector])
output_data.append(x.strip() + "\t" + new_prob_str.strip())
# add the watermarked data for future checks
watermark_data.append(x.strip() + "\t" + new_prob_str.strip() + "\t" +
argmax_prob_str.strip())
else:
output_data.append(x.strip() + "\t" + argmax_prob_str.strip())
logging.info("Total dataset size = %d", len(output_data))
logging.info("Total watermarked instances = %d", len(watermark_data))
output_data = [header] + output_data
watermark_data = [watermark_header] + watermark_data
with gfile.Open(FLAGS.output_path, "w") as f:
f.write("\n".join(output_data) + "\n")
with gfile.Open(FLAGS.watermark_path, "w") as f:
f.write("\n".join(watermark_data) + "\n")
return
if __name__ == "__main__":
app.run(main)
|
fraricci/pymatgen
|
pymatgen/analysis/cost/__init__.py
|
Python
|
mit
| 300
| 0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed un
|
der the terms of the MIT License.
__author__ = 'Anubhav Jain'
__copyright__ = 'Copyright 2014, The Materials Project'
__version__ =
|
'0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Oct 03, 2014'
|
buret/pylmflib
|
pylmflib/morphosyntax/paradigm.py
|
Python
|
gpl-2.0
| 3,401
| 0.00147
|
#! /usr/bin/env python
"""! @package morphosyntax
"""
from utils.attr import check_attr_type, check_attr_range
from common.range import paradigmLabel_range
from config.mdf import pdl_paradigmLabel
class Paradigm():
"""! Paradigm is a class representing a morphological paradigm.
"""
def __init__(self):
"""! @brief Constructor.
Paradigm instances are owned by Sense.
@return A Paradigm instance.
"""
self.paradigmLabel = None
self.p
|
aradigm = None
self.language = None
self.morphology = None
# LexicalEntry lexeme
self.targets = None
## Pointer to an existing LexicalEntry
# There is zero or one LexicalEntry pointer per Paradigm instance
self.__lexical_entry = None
def __del__(self):
"""! @brief Destructor.
"""
# Decrement the reference count on pointed objects
|
self.__lexical_entry = None
def set_paradigmLabel(self, paradigm_label):
"""! @brief Set paradigm label.
@param paradigm_label The paradigm label to set.
@return Paradigm instance.
"""
error_msg = "Paradigm label value '%s' is not defined" % str(paradigm_label)
# Check paradigm label type
check_attr_type(paradigm_label, [str, unicode], error_msg)
# Check range of paradigm label value (also try with converted value from MDF to LMF)
value = check_attr_range(str(paradigm_label), paradigmLabel_range, error_msg, mapping=pdl_paradigmLabel)
# Do not restrict range of paradigm label value
if value is None:
value = paradigm_label
self.paradigmLabel = value
return self
def get_paradigmLabel(self):
"""! @brief Get paradigm label.
@return Paradigm attribute 'paradigmLabel'.
"""
return self.paradigmLabel
def set_paradigm(self, paradigm):
"""! @brief Set paradigm.
@param paradigm The paradigm to set.
@return Paradigm instance.
"""
self.paradigm = paradigm
return self
def get_paradigm(self, language=None):
"""! @brief Get paradigm.
@param language Language filter.
@return Paradigm attribute 'paradigm'.
"""
if language is None:
return self.paradigm
if language == self.get_language():
return self.paradigm
def set_language(self, language):
"""! @brief Set language of the paradigm.
@param language The paradigm language to set.
@return Paradigm instance.
"""
self.language = language
return self
def get_language(self):
"""! @brief Get paradigm language.
@return Paradigm attribute 'language'.
"""
return self.language
def set_morphology(self, morphology):
"""! @brief Set morphology.
@param morphology The morphology to set.
@return Paradigm instance.
"""
self.morphology = morphology
return self
def get_morphology(self):
"""! @brief Get morphology.
@return Paradigm attribute 'morphology'.
"""
return self.morphology
def get_lexical_entry(self):
"""! @brief Get pointed lexical entry.
@return Paradigm private attribute '__lexical_entry'.
"""
return self.__lexical_entry
|
ChinaQuants/zipline
|
tests/pipeline/test_pipeline_algo.py
|
Python
|
apache-2.0
| 19,784
| 0.000051
|
"""
Tests for Algorithms using the Pipeline API.
"""
from unittest import TestCase
from os.path import (
dirname,
join,
realpath,
)
from nose_parameterized import parameterized
from numpy import (
array,
arange,
full_like,
float64,
nan,
uint32,
)
from numpy.testing import assert_almost_equal
from pandas import (
concat,
DataFrame,
date_range,
DatetimeIndex,
Panel,
read_csv,
Series,
Timestamp,
)
from six import iteritems, itervalues
from testfixtures import TempDirectory
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
attach_pipeline,
pipeline_output,
get_datetime,
)
from zipline.errors import (
AttachPipelineAfterInitialize,
PipelineOutputDuringInitialize,
NoSuchPipeline,
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
DailyBarWriterFromCSVs,
SQLiteAdjustmentWriter,
SQLiteAdjustmentReader,
)
from zipline.finance
|
import trading
from zipline.pipeline import Pip
|
eline
from zipline.pipeline.factors import VWAP
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import DataFrameLoader, MULTIPLY
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.utils.test_utils import (
make_simple_asset_info,
str_to_seconds,
)
from zipline.utils.tradingcalendar import (
trading_day,
trading_days,
)
TEST_RESOURCE_PATH = join(
dirname(dirname(realpath(__file__))), # zipline_repo/tests
'resources',
'pipeline_inputs',
)
def rolling_vwap(df, length):
"Simple rolling vwap implementation for testing"
closes = df['close'].values
volumes = df['volume'].values
product = closes * volumes
out = full_like(closes, nan)
for upper_bound in range(length, len(closes) + 1):
bounds = slice(upper_bound - length, upper_bound)
out[upper_bound - 1] = product[bounds].sum() / volumes[bounds].sum()
return Series(out, index=df.index)
class ClosesOnly(TestCase):
def setUp(self):
self.env = env = trading.TradingEnvironment()
self.dates = date_range(
'2014-01-01', '2014-02-01', freq=trading_day, tz='UTC'
)
asset_info = DataFrame.from_records([
{
'sid': 1,
'symbol': 'A',
'asset_type': 'equity',
'start_date': self.dates[10],
'end_date': self.dates[13],
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'B',
'asset_type': 'equity',
'start_date': self.dates[11],
'end_date': self.dates[14],
'exchange': 'TEST',
},
{
'sid': 3,
'symbol': 'C',
'asset_type': 'equity',
'start_date': self.dates[12],
'end_date': self.dates[15],
'exchange': 'TEST',
},
])
self.first_asset_start = min(asset_info.start_date)
self.last_asset_end = max(asset_info.end_date)
env.write_data(equities_df=asset_info)
self.asset_finder = finder = env.asset_finder
sids = (1, 2, 3)
self.assets = finder.retrieve_all(sids)
# View of the baseline data.
self.closes = DataFrame(
{sid: arange(1, len(self.dates) + 1) * sid for sid in sids},
index=self.dates,
dtype=float,
)
# Add a split for 'A' on its second date.
self.split_asset = self.assets[0]
self.split_date = self.split_asset.start_date + trading_day
self.split_ratio = 0.5
self.adjustments = DataFrame.from_records([
{
'sid': self.split_asset.sid,
'value': self.split_ratio,
'kind': MULTIPLY,
'start_date': Timestamp('NaT'),
'end_date': self.split_date,
'apply_date': self.split_date,
}
])
# View of the data on/after the split.
self.adj_closes = adj_closes = self.closes.copy()
adj_closes.ix[:self.split_date, self.split_asset] *= self.split_ratio
self.pipeline_loader = DataFrameLoader(
column=USEquityPricing.close,
baseline=self.closes,
adjustments=self.adjustments,
)
def expected_close(self, date, asset):
if date < self.split_date:
lookup = self.closes
else:
lookup = self.adj_closes
return lookup.loc[date, asset]
def exists(self, date, asset):
return asset.start_date <= date <= asset.end_date
def test_attach_pipeline_after_initialize(self):
"""
Assert that calling attach_pipeline after initialize raises correctly.
"""
def initialize(context):
pass
def late_attach(context, data):
attach_pipeline(Pipeline(), 'test')
raise AssertionError("Shouldn't make it past attach_pipeline!")
algo = TradingAlgorithm(
initialize=initialize,
handle_data=late_attach,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(AttachPipelineAfterInitialize):
algo.run(source=self.closes)
def barf(context, data):
raise AssertionError("Shouldn't make it past before_trading_start")
algo = TradingAlgorithm(
initialize=initialize,
before_trading_start=late_attach,
handle_data=barf,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(AttachPipelineAfterInitialize):
algo.run(source=self.closes)
def test_pipeline_output_after_initialize(self):
"""
Assert that calling pipeline_output after initialize raises correctly.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
pipeline_output('test')
raise AssertionError("Shouldn't make it past pipeline_output()")
def handle_data(context, data):
raise AssertionError("Shouldn't make it past initialize!")
def before_trading_start(context, data):
raise AssertionError("Shouldn't make it past initialize!")
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(PipelineOutputDuringInitialize):
algo.run(source=self.closes)
def test_get_output_nonexistent_pipeline(self):
"""
Assert that calling add_pipeline after initialize raises appropriately.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
def handle_data(context, data):
raise AssertionError("Shouldn't make it past before_trading_start")
def before_trading_start(context, data):
pipeline_output('not_test')
raise AssertionError("Shouldn't make it past pipeline_output!")
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_as
|
simplegeo/authorize
|
authorize/gen_xml.py
|
Python
|
mit
| 17,930
| 0.004964
|
# -*- encoding: utf-8 -*-
import re
import decimal
from xml.etree.cElementTree import fromstring, tostring
from xml.etree.cElementTree import Element, iselement
from authorize import responses
API_SCHEMA = 'https://api.authorize.net/xml/v1/schema/AnetApiSchema.xsd'
API_SCHEMA_NS = "AnetApi/xml/v1/schema/AnetApiSchema.xsd"
PREFIX = "{AnetApi/xml/v1/schema/AnetApiSchema.xsd}"
INDIVIDUAL = u"individual"
BUSINESS = u"business"
ECHECK_CCD = u"CCD"
ECHECK_PPD = u"PPD"
ECHECK_TEL = u"TEL"
ECHECK_WEB = u"WEB"
BANK = u"bank"
CREDIT_CARD = u"cc"
ECHECK = u"echeck"
DAYS_INTERVAL = u"days"
MONTHS_INTERVAL = u"months"
VALIDATION_NONE = u"none"
VALIDATION_TEST = u"testMode"
VALIDATION_LIVE = u"liveMode"
ACCOUNT_CHECKING = u"checking"
ACCOUNT_SAVINGS = u"savings"
ACCOUNT_BUSINESS_CHECKING = u"businessChecking"
AUTH_ONLY = u"auth_only"
CAPTURE_ONLY = u"capture_only"
AUTH_CAPTURE = u"auth_capture"
CREDIT = u"credit"
PRIOR_A
|
UTH_CAPTURE = u"prior_auth_capture"
VOID = u"void"
class AuthorizeSystemError(Exception):
"""
I'm a serious kind of exception and I'm raised when something
went really bad at a lower level than the application level, like
when Authorize is down or when they return an unparseable r
|
esponse
"""
def __init__(self, *args):
self.args = args
def __str__(self):
return "Exception: %s caused by %s" % self.args
def __repr__(self):
# Here we are printing a tuple, the , at the end is _required_
return "AuthorizeSystemError%s" % (self.args,)
c = re.compile(r'([A-Z]+[a-z_]+)')
def convert(arg):
"""
Convert an object to its xml representation
"""
if iselement(arg):
return arg # the element
if isinstance(arg, dict_accessor):
try:
return arg.text_
except:
raise Exception("Cannot serialize %s, missing text_ attribute" % (arg,))
if isinstance(arg, dict):
return arg # attributes of the element
if isinstance(arg, unicode):
return arg
if isinstance(arg, decimal.Decimal):
return unicode(arg)
if arg is True:
return 'true'
if arg is False:
return 'false'
if isinstance(arg, float):
return unicode(round(arg, 2)) # there's nothing less than cents anyway
if isinstance(arg, (int, long)):
return unicode(arg)
if isinstance(arg, str):
raise Exception("'%s' not unicode: can only accept unicode strings" % (arg,))
raise Exception("Cannot convert %s of type %s" % (arg, type(arg)))
def utf8convert(arg):
"""
Further extend L{convert} to return UTF-8 strings instead of unicode.
"""
value = convert(arg)
if isinstance(value, unicode):
return value.encode('utf-8')
return value
class XMLBuilder(object):
"""
XMLBuilder tries to be slightly clever in order to be easier for
the programmer. If you try to add arguments that are None they
won't be added to the output because empty XML tags are not worth
the bandwidth and actually mean something different than None.
"""
def __getattr__(self, key):
def _wrapper_func(*args):
converted = [convert(arg) for arg in args if arg is not None]
if not converted:
return None
el = Element(key)
settext = False
setatts = False
for arg in converted:
if iselement(arg):
el.append(arg)
elif isinstance(arg, basestring):
assert not settext, "cannot set text twice"
el.text = arg
settext = True
elif isinstance(arg, dict):
assert not setatts, "cannot set attributes twice"
for k, v in arg.iteritems():
el.set(k, v)
setatts = True
else:
raise TypeError("unhandled argument type: %s" % type(arg))
return el
return _wrapper_func
x = XMLBuilder()
def flatten(tree):
"""
Return a flattened tree in string format encoded in utf-8
"""
return tostring(tree, "utf-8")
def purify(s):
"""
s is an etree.tag and contains also information on the namespace,
if that information is present try to remove it, then convert the
camelCaseTags to underscore_notation_more_python_friendly.
"""
if s.startswith(PREFIX):
s = s[len(PREFIX):]
return '_'.join(atom.lower() for atom in c.split(s) if atom)
class dict_accessor(dict):
"""
Allow accessing a dictionary content also using dot-notation.
"""
def __getattr__(self, attr):
return super(dict_accessor, self).__getitem__(attr)
def __setattr__(self, attr, value):
super(dict_accessor, self).__setitem__(attr, value)
def parse_node(node):
"""
Return a dict_accessor representation of the node.
"""
new = dict_accessor({})
if node.text and node.text.strip():
t = node.text
if isinstance(t, unicode):
new['text_'] = t
else:
new['text_'] = t.decode('utf-8', "replace")
if node.attrib:
new['attrib_'] = dict_accessor(node.attrib)
for child in node.getchildren():
tag = purify(child.tag)
child = parse_node(child)
if tag not in new:
new[tag] = child
else:
old = new[tag]
if not isinstance(old, list):
new[tag] = [old]
new[tag].append(child)
return new
def to_dict(s, error_codes, do_raise=True, delimiter=u',', encapsulator=u'', uniform=False):
"""
Return a dict_accessor representation of the given string, if raise_
is True an exception is raised when an error code is present.
"""
try:
t = fromstring(s)
except SyntaxError, e:
raise AuthorizeSystemError(e, s)
parsed = dict_accessor(parse_node(t)) # discard the root node which is useless
try:
if isinstance(parsed.messages.message, list): # there's more than a child
return parsed
code = parsed.messages.message.code.text_
if uniform:
parsed.messages.message = [parsed.messages.message]
except KeyError:
return parsed
if code in error_codes:
if do_raise:
raise error_codes[code]
dr = None
if parsed.get('direct_response') is not None:
dr = parsed.direct_response.text_
elif parsed.get('validation_direct_response') is not None:
dr = parsed.validation_direct_response.text_
if dr is not None:
parsed.direct_response = parse_direct_response(dr,
delimiter,
encapsulator)
return parsed
m = ['code', 'subcode', 'reason_code', 'reason_text', 'auth_code',
'avs', 'trans_id', 'invoice_number', 'description', 'amount', 'method',
'trans_type', 'customer_id', 'first_name', 'last_name', 'company',
'address', 'city', 'state', 'zip', 'country', 'phone', 'fax', 'email',
'ship_first_name', 'ship_last_name', 'ship_company', 'ship_address',
'ship_city', 'ship_state', 'ship_zip', 'ship_country', 'tax', 'duty',
'freight', 'tax_exempt', 'po_number', 'md5_hash', 'ccv',
'holder_verification']
def parse_direct_response(s, delimiter=u',', encapsulator=u''):
"""
Very simple format but made of many fields, the most complex ones
have the following meanings:
code:
see L{responses.aim_codes} for all the codes
avs:
see L{responses.avs_codes} for all the codes
method: CC or ECHECK
trans_type:
AUTH_CAPTURE
AUTH_ONLY
CAPTURE_ONLY
CREDIT
PRIOR_AUTH_CAPTURE
VOID
tax_exempt: true, false, T, F, YES, NO, Y, N, 1, 0
ccv:
see L{responses.ccv_codes} for all the codes
holder_verification:
see L{responses.holder_verification_codes} for all the codes
"""
if not isinstance(s, unicode):
s = s.decode('utf-8
|
3324fr/spinalcordtoolbox
|
dev/sct_detect_spinalcord/sct_get_centerline_from_labels.py
|
Python
|
mit
| 5,974
| 0.01473
|
#!/usr/bin/env python
import commands, sys
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
from msct_parser import Parser
from nibabel import load, save, Nifti1Image
import os
import time
import sct_utils as sct
from sct_process_segmentation import extract_centerline
from sct_orientation import get_orientation
# DEFAULT PARAMETERS
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.verbose = 1 # verbose
self.remove_temp_files = 1
self.type_window = 'hanning' # for smooth_centerline @sct_straighten_spinalcord
self.window_length = 80 # for smooth_centerline @sct_straighten_spinalcord
self.algo_fitting = 'nurbs'
# self.parameter = "binary_centerline"
self.list_file = []
self.output_file_name = ''
def main(list_file, param, output_file_name=None, remove_temp_files = 1, verbose = 0):
path, file, ext = sct.extract_fname(list_file[0])
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files into tmp folder
sct.printv('\nCopy files into tmp folder...', verbose)
for i in range(len(list_file)):
file_temp = os.path.abspath(list_file[i])
sct.run('cp '+file_temp+' '+path_tmp)
# go to tmp folder
os.chdir(path_tmp)
## Concatenation of the files
# Concatenation : sum of matrices
file_0 = load(file+ext)
data_concatenation = file_0.get_data()
hdr_0 = file_0.get_header()
orientation_file_0 = get_orientation(list_file[0])
if len(list_file)>0:
for i in range(1, len(list_file)):
orientation_file_temp = get_orientation(list_file[i])
if orientation_file_0 != orientation_file_temp :
print "ERROR: The files ", list_file[0], " and ", list_file[i], " are not in the same orientation. Use sct_orientation to change the orientation of a file."
sys.exit(2)
file_temp = load(list_file[i])
data_temp = file_temp.get_data()
data_concatenation = data_concatenation + data_temp
# Save concatenation as a file
print '\nWrite NIFTI volumes...'
img = Nifti1Image(data_concatenation, None, hdr_0)
save(img,'concatenation_file.nii.gz')
# Applying nurbs to the concatenation and save file as binary file
fname_output = extract_centerline('concatenation_file.nii.gz', remove_temp_files = remove_temp_files, verbose = verbose, algo_fitting=param.algo_fitting, type_window=param.type_window, window_length=param.wind
|
ow_length)
# Rename files after processing
if output_file_name != None:
output_file_name = output_file_name
else : output_file_name = "generated_centerline.nii.gz"
os.
|
rename(fname_output, output_file_name)
path_binary, file_binary, ext_binary = sct.extract_fname(output_file_name)
os.rename('concatenation_file_centerline.txt', file_binary+'.txt')
# Process for a binary file as output:
sct.run('cp '+output_file_name+' ../')
# Process for a text file as output:
sct.run('cp '+file_binary+ '.txt'+ ' ../')
os.chdir('../')
# Remove temporary files
if remove_temp_files:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
# Display results
# The concatenate centerline and its fitted curve are displayed whithin extract_centerline
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Compute a centerline from a list of segmentation and label files. It concatenates the parts, then extract the centerline. The output is a NIFTI image and a text file with the float coordinates (z, x, y) of the centerline.')
parser.add_option(name="-i",
type_value=[[','],'file'],
description="List containing segmentation NIFTI file and label NIFTI files. They must be 3D. Names must be separated by commas without spaces.",
mandatory=True,
example= "data_seg.nii.gz,label1.nii.gz,label2.nii.gz")
parser.add_option(name="-o",
type_value="file_output",
description="Name of the output NIFTI image with the centerline and of the output text file with the coordinates (z, x, y) (but text file will have '.txt' extension).",
mandatory=False,
default_value='generated_centerline.nii.gz')
parser.add_option(name="-r",
type_value="multiple_choice",
description="Remove temporary files. Specify 0 to get access to temporary files.",
mandatory=False,
example=['0','1'],
default_value="1")
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing. 1: basic. 2: extended.",
mandatory=False,
default_value='0',
example=['0', '1', '2'])
arguments = parser.parse(sys.argv[1:])
remove_temp_files = int(arguments["-r"])
verbose = int(arguments["-v"])
if "-i" in arguments:
list_file = arguments["-i"]
else: list_file = None
if "-o" in arguments:
output_file_name = arguments["-o"]
else: output_file_name = None
param = Param()
param.verbose = verbose
param.remove_temp_files =remove_temp_files
main(list_file, param, output_file_name, remove_temp_files, verbose)
|
zozo123/buildbot
|
master/buildbot/test/unit/test_db_schedulers.py
|
Python
|
gpl-3.0
| 14,846
| 0.000741
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.db import schedulers
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import connector_component
from buildbot.test.util import interfaces
from buildbot.test.util import validation
from twisted.internet import defer
from twisted.trial import unittest
class Tests(interfaces.InterfaceTests):
# test data
ss92 = fakedb.SourceStamp(id=92)
change3 = fakedb.Change(changeid=3)
change4 = fakedb.Change(changeid=4)
change5 = fakedb.Change(changeid=5)
change6 = fakedb.Change(changeid=6, branch='sql')
scheduler24 = fakedb.Scheduler(id=24, name='schname')
master13 = fakedb.Master(id=13, name='m1', active=1)
scheduler24master = fakedb.SchedulerMaster(schedulerid=24, masterid=13)
scheduler25 = fakedb.Scheduler(id=25, name='s
|
chname2')
master14 = fakedb.Master(id=14, name='m2', active=0)
scheduler25master = fakedb.SchedulerMaster(schedulerid=25, masterid=14)
# tests
def test_signature_classifyChanges(self):
@self.assertArgSpecMatches(self.db.schedulers.classifyChanges)
def classifyChanges(self, schedulerid, classifications):
pass
@defer.inlineCallbacks
def test_classifyChanges(self):
|
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.scheduler24])
yield self.db.schedulers.classifyChanges(24,
{3: False, 4: True})
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: False, 4: True})
@defer.inlineCallbacks
def test_classifyChanges_again(self):
# test reclassifying changes, which may happen during some timing
# conditions
yield self.insertTestData([
self.ss92,
self.change3,
self.scheduler24,
fakedb.SchedulerChange(schedulerid=24, changeid=3, important=0),
])
yield self.db.schedulers.classifyChanges(24, {3: True})
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: True})
def test_signature_flushChangeClassifications(self):
@self.assertArgSpecMatches(
self.db.schedulers.flushChangeClassifications)
def flushChangeClassifications(self, schedulerid, less_than=None):
pass
@defer.inlineCallbacks
def test_flushChangeClassifications(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.change5, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1))
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: True, 4: False, 5: True})
yield self.db.schedulers.flushChangeClassifications(24)
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {})
@defer.inlineCallbacks
def test_flushChangeClassifications_less_than(self):
yield self.insertTestData([self.ss92, self.change3,
self.change4, self.change5, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1))
yield self.db.schedulers.flushChangeClassifications(24, less_than=5)
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {5: True})
def test_signature_getChangeClassifications(self):
@self.assertArgSpecMatches(self.db.schedulers.getChangeClassifications)
def getChangeClassifications(self, schedulerid, branch=-1,
repository=-1, project=-1, codebase=-1):
pass
@defer.inlineCallbacks
def test_getChangeClassifications(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.change5, self.change6, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1), (6, 1))
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: True, 4: False, 5: True, 6: True})
@defer.inlineCallbacks
def test_getChangeClassifications_branch(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.change5, self.change6, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1), (6, 1))
res = yield self.db.schedulers.getChangeClassifications(24,
branch='sql')
self.assertEqual(res, {6: True})
def test_signature_findSchedulerId(self):
@self.assertArgSpecMatches(self.db.schedulers.findSchedulerId)
def findSchedulerId(self, name):
pass
@defer.inlineCallbacks
def test_findSchedulerId_new(self):
id = yield self.db.schedulers.findSchedulerId('schname')
sch = yield self.db.schedulers.getScheduler(id)
self.assertEqual(sch['name'], 'schname')
@defer.inlineCallbacks
def test_findSchedulerId_existing(self):
id = yield self.db.schedulers.findSchedulerId('schname')
id2 = yield self.db.schedulers.findSchedulerId('schname')
self.assertEqual(id, id2)
def test_signature_setSchedulerMaster(self):
@self.assertArgSpecMatches(self.db.schedulers.setSchedulerMaster)
def setSchedulerMaster(self, schedulerid, masterid):
pass
@defer.inlineCallbacks
def test_setSchedulerMaster_fresh(self):
yield self.insertTestData([self.scheduler24, self.master13])
yield self.db.schedulers.setSchedulerMaster(24, 13)
sch = yield self.db.schedulers.getScheduler(24)
self.assertEqual(sch['masterid'], 13)
def test_setSchedulerMaster_inactive_but_linked(self):
d = self.insertTestData([
self.master13,
self.scheduler25, self.master14, self.scheduler25master,
])
d.addCallback(lambda _:
self.db.schedulers.setSchedulerMaster(25, 13))
self.assertFailure(d, schedulers.SchedulerAlreadyClaimedError)
return d
def test_setSchedulerMaster_active(self):
d = self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
])
d.addCallback(lambda _:
self.db.schedulers.setSchedulerMaster(24, 14))
self.assertFailure(d, schedulers.SchedulerAlreadyClaimedError)
return d
@defer.inlineCallbacks
def test_setSchedulerMaster_None(self):
yield self.insertTestData([
self.scheduler25, self.master14, self.scheduler25master,
])
yield self.db.schedulers.setSchedulerMaster(25, None)
sch = yield self.db.schedulers.getScheduler(25)
self.assertEqual(sch['masterid'], None)
@defer.inlineCallbacks
def test_setSchedulerMaster_None_unowned(self):
yield self.insertTestData([self.scheduler25])
yield self.db.schedulers.setSchedulerMaster(25, None)
sch = yield self.db.schedulers.getScheduler(25)
self.assertEqual(sch['masterid'], None)
def test_signature_getScheduler(self):
@self.assertArgSpe
|
chase-qi/workload-automation
|
wlauto/instrumentation/misc/__init__.py
|
Python
|
apache-2.0
| 17,103
| 0.003859
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,no-member,attribute-defined-outside-init
"""
Some "standard" instruments to collect additional info about workload execution.
.. note:: The run() method of a Workload may perform some "boilerplate" as well as
the actual execution of the workload (e.g. it may contain UI automation
needed to start the workload). This "boilerplate" execution will also
be measured by these instruments. As such, they are not suitable for collected
precise data about specific operations.
"""
import os
import re
import logging
import time
import tarfile
from itertools import izip, izip_longest
from subprocess import CalledProcessError
from wlauto import Instrument, Parameter
from wlauto.core import signal
from wlauto.exceptions import DeviceError, ConfigError
from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
from wlauto.utils.misc import ensure_file_directory_exists as _f
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.android import ApkInfo
from wlauto.utils.types import list_of_strings
logger = logging.getLogger(__name__)
class SysfsExtractor(Instrument):
name = 'sysfs_extractor'
description = """
Collects the contest of a set of directories, before and after workload execution
and diffs the result.
"""
mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
extract_timeout = 30
tarname = 'sysfs.tar'
DEVICE_PATH = 0
BEFORE_PATH = 1
AFTER_PATH = 2
DIFF_PATH = 3
parameters = [
Parameter('paths', kind=list_of_strings, mandatory=True,
description="""A list of paths to be pulled from the device. These could be directories
as well as files.""",
global_alias='sysfs_extract_dirs'),
Parameter('use_tmpfs', kind=bool, default=None,
description="""
Specifies whether tmpfs should be used to cache sysfile trees and then pull them down
as a tarball. This is significantly faster then just copying the directory trees from
the device directly, bur requres root and may not work on all devices. Defaults to
``True`` if the device is rooted and ``False`` if it is not.
"""),
Parameter('tmpfs_mount_point', default=None,
description="""Mount point for tmpfs partition used to store snapshots of paths."""),
Parameter('tmpfs_size', default='32m',
description="""Size of the tempfs partition."""),
]
def initialize(self, context):
if not self.device.is_rooted and self.use_tmpfs: # pylint: disable=access-member-before-definition
raise ConfigError('use_tempfs must be False for an unrooted device.')
elif self.use_tmpfs is None: # pylint: disable=access-member-before-definition
self.use_tmpfs = self.device.is_rooted
if self.use_tmpfs:
self.on_device_before = self.device.path.join(self.tmpfs_mount_point, 'before')
self.on_device_after = self.device.path.join(self.tmpfs_mount_point, 'after')
if not self.device.file_exists(self.tmpfs_mount_point):
self.device.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True)
self.device.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point),
as_root=True)
def setup(self, context):
before_dirs = [
_d(os.path.join(context.output_directory, 'before', self._local_dir(d)))
for d in self.paths
]
after_dirs = [
_d(os.path.join(context.output_directory, 'after', self._local_dir(d)))
for d in self.paths
]
diff_dirs = [
_d(os.path.join(context.output_directory, 'diff', self._local_dir(d)))
for d in self.paths
]
self.device_and_host_paths = zip(self.paths, before_dirs, after_dirs, diff_dirs)
if self.use_tmpfs:
for d in self.paths:
before_dir = self.device.path.join(self.on_device_before,
self.device.path.dirname(as_relative(d)))
after_dir = self.device.path.join(self.on_device_after,
self.device.path.dirname(as_relative(d)))
if self.device.file_exists(before_dir):
self.device.execute('rm -rf {}'.format(before_dir), as_root=True)
self.device.execute('mkdir -p {}'.format(before_dir), as_root=True)
if self.device.file_exists(after_dir):
self.device.execute('rm -rf {}'.format(after_dir), as_root=True)
self.device.execute('mkdir -p {}'.format(after_dir), as_root=True)
def slow_start(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.device.path.join(self.on_device_before, as_relative(d))
if '*' in dest_dir:
dest_dir = self.device.path.dirname(dest_dir)
self.device.execute('{} cp -Hr {} {}'.format(self.device.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not rooted
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
self.device.pull_file(dev_dir, before_dir)
def slow_stop(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.device.path.join(self.on_device_after, as_relative(d))
if '*' in dest_dir:
dest_dir = self.device.path.dirname(dest_dir)
self.device.execute('{} cp -Hr {} {}'.format(self.device.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not using tmpfs
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
self.device.pull_file(dev_dir, after_dir)
def update_result(self, context):
if self.use_tmpfs:
on_device_tarball = self.device.path.join(self.device.working_directory, self.tarname)
on_host_tarball = self.device.path.join(context.output_directory, self.tarname + ".gz")
self.device.execute('{} tar cf {} -C {} .'.format(self.device.busybox,
on_device_tarball,
self.tmpfs_mount_point),
as_root=True)
self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
self.device.execute('{} gzip {}'.format(self.device.busybox,
on_device_tarball))
self.device.pull_file(on_device_tarball + ".gz", on_host_tarball)
with tarfile.open(on_host_tarball, 'r:gz') as
|
tf:
tf.extractall(context.output_directory)
self.device.delete_file(on_device_tarball + ".gz")
os.remove(on_host_tarball)
for paths in self.device_and_ho
|
st_paths:
after_dir = paths[self.AFTER_PATH]
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
if (not os.listdir(after_dir) and
self.device.file_exists(dev_dir) and
self.device.listdir(dev_dir)):
|
zhaochao/fuel-web
|
network_checker/network_checker/net_check/api.py
|
Python
|
apache-2.0
| 25,801
| 0.000504
|
#!/usr/bin/env python
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Generate and send Ethernet packets to specified interfaces.
# Collect data from interfaces.
# Analyse dumps for packets with special cookie in UDP payload.
#
import argparse
import itertools
import json
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import time
import traceback
import logging.handlers
from scapy import config as scapy_config
scapy_config.logLevel = 40
scapy_config.use_pcap = True
import scapy.all as scapy
from scapy.utils import rdpcap
class ActorFabric(object):
@classmethod
def getInstance(cls, config):
if config.get('action') not in ('listen', 'generate'):
raise Exception(
'Wrong config, you need define '
'valid action instead of {0}'.format(config.get('action')))
if config['action'] in ('listen',):
return Listener(config)
elif config['action'] in ('generate',):
return Sender(config)
class ActorException(Exception):
def __init__(self, logger, messa
|
ge='', level='error'):
getattr(logger, level, logger.error)(message)
super(ActorException, self).__init__(message)
class Actor(object):
def __init__(self, config=None):
self.config = {
'src_mac': None,
'src': '198.18.1.1',
'dst': '198.18.1.2',
'sport': 31337,
'dpor
|
t': 31337,
'cookie': "Nailgun:",
'pcap_dir': "/var/run/pcap_dir/",
'duration': 5,
'repeat': 1
}
if config:
self.config.update(config)
self.logger.debug("Running with config: %s", json.dumps(self.config))
self._execute(["modprobe", "8021q"])
self.iface_down_after = {}
self.viface_remove_after = {}
def _define_logger(self, filename=None,
appname='netprobe', level=logging.DEBUG):
logger = logging.getLogger(appname)
logger.setLevel(level)
syslog_formatter = logging.Formatter(
'{appname}: %(message)s'.format(appname=appname)
)
syslog_handler = logging.handlers.SysLogHandler('/dev/log')
syslog_handler.setFormatter(syslog_formatter)
logger.addHandler(syslog_handler)
# A syslog handler should be always. But a file handler is the option.
# If you don't want it you can keep 'filename' variable as None to skip
# this handler.
if filename:
file_formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'
)
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
return logger
def _execute(self, command, expected_exit_codes=(0,)):
self.logger.debug("Running command: %s" % " ".join(command))
env = os.environ
env["PATH"] = "/bin:/usr/bin:/sbin:/usr/sbin"
p = subprocess.Popen(command, shell=False,
env=env, stdout=subprocess.PIPE)
output, _ = p.communicate()
if p.returncode not in expected_exit_codes:
raise ActorException(
self.logger,
"Command exited with error: %s: %s" % (" ".join(command),
p.returncode)
)
return output.split('\n')
def _viface_by_iface_vid(self, iface, vid):
return (self._try_viface_create(iface, vid) or "%s.%d" % (iface, vid))
def _iface_name(self, iface, vid=None):
if vid:
return self._viface_by_iface_vid(iface, vid)
return iface
def _look_for_link(self, iface, vid=None):
viface = None
if vid:
viface = self._viface_by_iface_vid(iface, vid)
command = ['ip', 'link']
r = re.compile(ur"(\d+?):\s+((?P<viface>[^:@]+)@)?(?P<iface>[^:]+?):"
".+?(?P<state>UP|DOWN|UNKNOWN).*$")
for line in self._execute(command):
m = r.search(line)
if m:
md = m.groupdict()
if (iface == md.get('iface') and
viface == md.get('viface') and md.get('state')):
return (iface, viface, md.get('state'))
# If we are here we aren't able to say if iface with vid is up
raise ActorException(
self.logger,
"Cannot find interface %s with vid=%s" % (iface, vid)
)
def _try_iface_up(self, iface, vid=None):
if vid and not self._try_viface_create(iface, vid):
# if viface does not exist we raise exception
raise ActorException(
self.logger,
"Vlan %s on interface %s does not exist" % (str(vid), iface)
)
self.logger.debug("Checking if interface %s with vid %s is up",
iface, str(vid))
_, _, state = self._look_for_link(iface, vid)
return (state == 'UP')
def _iface_up(self, iface, vid=None):
"""Brings interface with vid up
"""
if vid and not self._try_viface_create(iface, vid):
# if viface does not exist we raise exception
raise ActorException(
self.logger,
"Vlan %s on interface %s does not exist" % (str(vid), iface)
)
set_iface = self._iface_name(iface, vid)
self.logger.debug("Brining interface %s with vid %s up",
set_iface, str(vid))
self._execute([
"ip",
"link", "set",
"dev", set_iface,
"up"])
def _ensure_iface_up(self, iface, vid=None):
"""Ensures interface is with vid up.
"""
if not self._try_iface_up(iface, vid):
# if iface is not up we try to bring it up
self._iface_up(iface, vid)
if self._try_iface_up(iface, vid):
# if iface was down and we have brought it up
# we should mark it to be brought down after probing
self.iface_down_after[self._iface_name(iface, vid)] = True
else:
# if viface is still down we raise exception
raise ActorException(
self.logger,
"Can not bring interface %s with vid %s up" % (iface,
str(vid))
)
def _ensure_iface_down(self, iface, vid=None):
set_iface = self._iface_name(iface, vid)
if self.iface_down_after.get(set_iface, False):
# if iface with vid have been marked to be brought down
# after probing we try to bring it down
self.logger.debug("Brining down interface %s with vid %s",
iface, str(vid))
self._execute([
"ip",
"link", "set",
"dev", set_iface,
"down"])
self.iface_down_after.pop(set_iface)
def _try_viface_create(self, iface, vid):
"""Tries to find vlan interface on iface with VLAN_ID=vid and return it
:returns: name of vlan interface if it exists or None
"""
self.logger.debug("Checking if vlan %s on interface %s exists",
str(vid), iface)
with open("/proc/net/vlan/config", "r") as f:
for line in f:
m = r
|
ploneintranet/ploneintranet.workspace
|
src/ploneintranet/workspace/tests/test_sidebar.py
|
Python
|
gpl-2.0
| 4,580
| 0
|
# coding=utf-8
from plone import api
from plone.tiles.interfaces import IBasicTile
from ploneintranet.workspace.browser.tiles.sidebar import Sidebar
from ploneintranet.workspace.browser.tiles.sidebar import \
SidebarSettingsMembers
from ploneintranet.workspace.tests.base import BaseTestCase
from zope.component import getMultiAdapter
from zope.component import provideAdapter
from zope.interface import Interface
from collective.workspace.interfaces import IWorkspace
class TestSidebar(BaseTestCase):
def create_workspace(self):
""" returns adapted workspa
|
ce folder"""
workspace_folder = api.content.create(
self.portal,
'ploneintranet.workspace.workspacefolder
|
',
'example-workspace',
title='Welcome to my workspace'
)
return workspace_folder
# return IWorkspace(workspace_folder)
def test_sidebar_existing_users(self):
ws = self.create_workspace()
user = api.user.create(email="newuser@example.org", username="newuser")
user_id = user.getId()
self.assertNotIn(user_id, IWorkspace(ws).members, "Id already present")
IWorkspace(ws).add_to_team(user=user_id)
provideAdapter(
SidebarSettingsMembers,
(Interface, Interface),
IBasicTile,
name=u"sidebarSettingsMember.default",
)
# Commenting out because they aren't (yet?) being used.
# sidebarSettingsMembers = getMultiAdapter(
# (ws, ws.REQUEST), name=u"sidebarSettingsMember.default")
# existing_users = sidebarSettingsMembers.existing_users()
self.assertIn(
user_id,
IWorkspace(ws).members,
"Id not found in worskpace member Ids",
)
def test_sidebar_children(self):
""" Create some test content and test if children method works
"""
self.login_as_portal_owner()
ws = self.create_workspace()
api.content.create(
ws,
'Document',
'example-document',
title='Some example Rich Text'
)
api.content.create(
ws,
'Folder',
'myfolder',
title='An example Folder'
)
myfolder = getattr(ws, 'myfolder')
api.content.create(
myfolder,
'Document',
'example-subdocument',
title='Some example nested Rich Text'
)
provideAdapter(Sidebar, (Interface, Interface), IBasicTile,
name=u"sidebar.default")
sidebar = getMultiAdapter((ws, ws.REQUEST), name=u"sidebar.default")
children = sidebar.children()
titles = [x['title'] for x in children]
self.assertIn('Some example Rich Text',
titles,
"File with that title not found in sidebar navigation")
urls = [x['url'] for x in children]
self.assertIn('http://nohost/plone/example-workspace/myfolder/'
'@@sidebar.default#workspace-documents',
urls,
"Folder with that url not found in sidebar navigation")
classes = [x['cls'] for x in children]
self.assertIn('item group type-folder has-no-description',
classes,
"No such Classes found in sidebar navigation")
ids = [x['id'] for x in children]
self.assertNotIn('example-subdocument',
ids,
"No such IDs found in sidebar navigation")
subsidebar = getMultiAdapter((myfolder, myfolder.REQUEST),
name=u"sidebar.default")
subchildren = subsidebar.children()
ids = [x['id'] for x in subchildren]
self.assertIn('example-subdocument',
ids,
"No such IDs found in sidebar navigation")
# Check if search works
from zope.publisher.browser import TestRequest
TR = TestRequest(form={'sidebar-search': 'Folder'})
sidebar = getMultiAdapter((ws, TR), name=u"sidebar.default")
children = sidebar.children()
self.assertEqual(len(children), 1)
self.assertTrue(children[0]['id'] == 'myfolder')
# Assert that substr works and we find all
TR = TestRequest(form={'sidebar-search': 'exampl'})
sidebar = getMultiAdapter((ws, TR), name=u"sidebar.default")
children = sidebar.children()
self.assertEqual(len(children), 3)
|
guaka/trust-metrics
|
trustlet/unittest/testXDiGraph.py
|
Python
|
gpl-2.0
| 3,116
| 0.08344
|
#!/usr/bin/env python
"""
test cache functions.
- save/load
- mmerge
"""
import unittest
import trustlet.igraphXdigraphMatch as IXD
import networkx as nx
import igraph
import os
#import sys
#import random
#import time
class TestIXD(unittest.TestCase):
def setUp(self):
self.g = IXD.XDiGraph()
self.g.add_edge('dan','mas',{'level':'journeyer'})
self.g.add_edge('mart','mas',{'level':'journeyer'})
self.g.add_edge('luc','mas',{'level':'master'})
self.g.add_edge('dan','luc',{'level':'apprentice'})
def testDot(self):
self.assertEqual( self.g.number_of_edges(), 4 )
self.assertEqual( self.g.number_of_edges(), 4 )
nx.write_dot( self.g, './test.dot' )
g1 = nx.read_dot( './test.dot' )
self.assertEqual( g1.number_of_edges(), self.g.number_of_edges() )
self.assertEqual( g1.number_of_edges(), 4 )
#os.remove( './test.dot' )
#first method of loading
#g2 = igraph.load( 'test~', format='pajek' )
#g3 = IXD.XDiGraph()
#self.g3.succ.g = g2
#self.assertEqual( g3.number_of_edges(), self.g.number_of_edges() )
#second method
#g1 = nx.read_pajek( './test.net' )
#self.assertEqual( self.g.number_of_edges(), g1.number_of_edges() )
#self.assertEqual( g3.number_of_edges(), g1.number_of_edges() )
def testValuesOnEdges(self):
self.assertEqual( self.g.get_edge( 'dan','mas' ) , {'level':'journeyer'} )
self.assertEqual( self.g.get_edge( 'luc','mas' ) , {'level':'master'} )
self.assertEqual( self.g.get_edge( 'dan','luc' ) , {'level':'apprentice'} )
try: #this edge cannot exist
x = self.g.get_edge( 'luc', 'dan' )
print ""
print "unknown edg
|
e", ('luc','dan',x)
self.assert_(False)
except nx.N
|
etworkXError:
pass
def testEdges(self):
self.assertEqual( sorted( self.g.edges() ) ,
sorted( [('dan','mas',{'level':'journeyer'}),
('mart','mas',{'level':'journeyer'}),
('luc','mas',{'level':'master'}),
('dan','luc',{'level':'apprentice'})]
)
)
def testDelete(self):
self.assertEqual( self.g.number_of_edges() , len( self.g.succ.g.es ) )
self.assertEqual( self.g.number_of_edges() , 4 )
self.assertEqual( self.g.number_of_nodes() , 4 )
self.g.delete_edge('dan','mas')
try:
x=self.g.get_edge('dan','mas')
self.assert_(False)
except nx.NetworkXError:
pass
self.assertEqual( self.g.number_of_edges() , 3 )
self.g.delete_node( 'luc' )
self.assertEqual( self.g.number_of_edges() , 1 )
self.assertEqual( self.g.number_of_nodes() , 3 )
def testInOutEdges(self):
self.assertEqual(
sorted( self.g.in_edges( 'mas' ) ) ,
sorted(
[ ('dan','mas',{'level':'journeyer'}), ('mart','mas',{'level':'journeyer'}),('luc','mas',{'level':'master'}) ]
)
)
self.assertEqual(
sorted( self.g.out_edges( 'dan' ) ) ,
sorted(
[ ('dan','mas',{'level':'journeyer'}), ('dan','luc',{'level':'apprentice'})]
)
)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestIXD)
unittest.TextTestRunner(verbosity=2).run(suite)
|
jirikadlec2/rushvalley
|
python/clean_logs.py
|
Python
|
mit
| 964
| 0.030083
|
#! /usr/bin/env python
import os
import sys
from dateutil import parser
BACK_LOG_SIZE = 14
if len(sys.argv) > 1:
print "This script deletes all but the " + str(BACK_LOG_SIZE) +" ne
|
west logs generated by the uploader."
print "It prints this message when run with any parameters. None are required."
sys.exit()
strdates = []
for root, dirs, files in os.walk("logfiles/"):
if len(files) > BACK_LOG_SIZE:
for filename in files:
if len(filename) > 8:
filename = filename.strip()
strDate = filename[:-8]
strDate = s
|
trDate.replace("_", " ")
strdates.append(strDate)
#sorts the array to reverse rder (i.e. newest first)
strdates.sort( key=parser.parse, reverse=True)
i = -1
for date in strdates:
i = i + 1
if i < BACK_LOG_SIZE : #skips the newest 10
continue
else:
#have to parse the filename back into shape
filename = date.replace(" ", "_")
filename = "logfiles/" + filename + "_log.log"
print filename
os.remove(filename)
|
CSIRTUK/TekDefense-Automater
|
outputs.py
|
Python
|
mit
| 39,375
| 0.007289
|
"""
The outputs.py module represents some form of all outputs
from the Automater program to include all variation of
output files. Any addition to the Automater that brings
any other output requirement should be programmed in this module.
Class(es):
SiteDetailOutput -- Wrapper class around all functions that print output
from Automater, to include standard output and file system output.
Function(s):
No global exportable functions are defined.
Exception(s):
No exceptions exported.
"""
import csv
import socket
import re
from datetime import datetime
from operator import attrgetter
class SiteDetailOutput(object):
"""
SiteDetailOutput provides the capability to output information
to the screen, a text file, a comma-seperated value file, or
a file formatted with html markup (readable by web browsers).
Public Method(s):
createOutputInfo
Instance variable(s):
_listofsites - list storing the list of site results stored.
"""
def __init__(self,sitelist):
"""
Class constructor. Stores the incoming list of sites in the _listofsites list.
Argument(s):
sitelist -- list containing site result information to be printed.
Return value(s):
Nothing is returned from this Method.
"""
self._listofsites = []
self._listofsites = sitelist
@property
def ListOfSites(self):
"""
Checks instance variable _listofsites for content.
Returns _listofsites if it has content or None if it does not.
Argument(s):
No arguments are required.
Return value(s):
_listofsites -- list containing list of site results if variable contains data.
None -- if _listofsites is empty or not assigned.
Restriction(s):
This Method is tagged as a Property.
"""
if self._listofsites is None or len(self._listofsites) == 0:
return None
return self._listofsites
def createOutputInfo(self,parser):
"""
Checks parser information calls correct print methods based on parser requirements.
Returns nothing.
Argument(s):
parser -- Parser object storing program input parameters used when program was run.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
self.PrintToScreen(parser.hasBotOut())
if parser.hasCEFOutFile():
self.PrintToCEFFile(parser.CEFOutFile)
if parser.hasTextOutFile():
self.PrintToTextFile(parser.TextOutFile)
if parser.hasHTMLOutFile():
self.PrintToHTMLFile(parser.HTMLOutFile)
if parser.hasCSVOutSet():
self.PrintToCSVFile(parser.CSVOutFile)
def PrintToScreen(self, printinbotformat):
"""
Calls correct function to ensure site information is printed to the user's standard output correctly.
Returns nothing.
Argument(s):
printinbotformat -- True or False argument representing minimized output. True if minimized requested.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
if printinbotformat:
self.PrintToScreenBot()
else:
self.PrintToScreenNormal()
def PrintToScreenBot(self):
"""
Formats site information minimized and prints it to the user's standard output.
Returns nothing.
Argument(s):
No arguments are required.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): # this is a multisite
for index in range(len(site.RegEx)): # the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if target != site.Target:
print "\n**_ Results found for: " + site.Target + " _**"
target = site.Target
# Check for them ALL to be None or 0 length
sourceurlhasnoreturn = True
for answer in siteimpprop:
if answer is not None:
if len(answer) > 0:
sourceurlhasnoreturn = False
if sourceurlhasnoreturn:
print '[+] ' + site.SourceURL + ' No results found'
break
else:
if siteimpprop is None or len(siteimpprop) == 0:
print "No results in the " + site.FriendlyName[index] + " category"
else:
if siteimpprop[index] is None or len(siteimpprop[index]) == 0:
print site.ReportStringForResult[index] + ' No results found'
else:
laststring = ""
# if it's just a string we don't want it output like a list
if isinstance(siteimpprop[index], basestring):
if "" + site.ReportStringForResult[index] + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteimpprop).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
# must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
if "" + site.ReportStringForResult[index] + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteresult).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult[index] + " " + str(siteresult)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if target != site.Target:
print "\n**_ Results found for: " + site.Target + " _**"
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
print '[+] ' + site.FriendlyName + ' No results found'
else:
laststring = ""
#if it's just a string we don't want it output like a list
|
if isinstance(siteimpprop, basestring):
if "" + site.ReportStringForResult + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult + " " + str(siteimpprop).replace('www.', 'www[.]
|
').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult + " " + str(siteimpprop)
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop:
if "" + site.ReportStringForResult + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult + " " + str(siteresult).replace('www.', 'www[.]').replace('http', 'hxxp')
lastst
|
rsepassi/tensor2tensor
|
tensor2tensor/models/xception.py
|
Python
|
apache-2.0
| 5,805
| 0.010336
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
# Dependency imports
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
|
dilations_and_kernels = [((1, 1), k) for _ i
|
n xrange(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout)
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope("xception"):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size)
else:
# Small image, conv
cur = common_layers.conv_block(
cur,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True,
name="small_image_conv")
for i in xrange(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
cur = residual_block(cur, hparams)
return xception_exit(cur)
def xception_entry(inputs, hidden_dim):
with tf.variable_scope("xception_entry"):
def xnet_resblock(x, filters, res_relu, name):
with tf.variable_scope(name):
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
tf.summary.image("inputs", inputs, max_outputs=2)
x = common_layers.conv_block(
inputs,
32, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
strides=(2, 2),
force2d=True,
name="conv0")
x = common_layers.conv_block(
x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1")
x = xnet_resblock(x, min(128, hidden_dim), True, "block0")
x = xnet_resblock(x, min(256, hidden_dim), False, "block1")
return xnet_resblock(x, hidden_dim, False, "block2")
def xception_exit(inputs):
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x)
@registry.register_model
class Xception(t2t_model.T2TModel):
def body(self, features):
return xception_internal(features["inputs"], self._hparams)
@registry.register_hparams
def xception_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.hidden_size = 768
hparams.dropout = 0.2
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 8
hparams.kernel_height = 3
hparams.kernel_width = 3
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 3000
hparams.initializer_gain = 1.0
hparams.weight_decay = 3.0
hparams.num_sampled_classes = 0
hparams.sampling_method = "argmax"
hparams.optimizer_adam_epsilon = 1e-6
hparams.optimizer_adam_beta1 = 0.85
hparams.optimizer_adam_beta2 = 0.997
return hparams
@registry.register_hparams
def xception_tiny():
hparams = xception_base()
hparams.batch_size = 2
hparams.hidden_size = 64
hparams.num_hidden_layers = 2
hparams.learning_rate_decay_scheme = "none"
return hparams
@registry.register_hparams
def xception_tiny_tpu():
hparams = xception_base()
hparams.batch_size = 2
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.optimizer = "TrueAdam"
return hparams
|
yangjiePro/cutout
|
example.py
|
Python
|
mit
| 4,408
| 0.030525
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os, time
from cutout import cutout
datastr = '''
<html>
<head>
<title>html网页标题</title>
</head>
<body>
<ul id="img">
<li> <p>pic1</p> <img src="/img/pic1.jpg" /> </li>
<li> <p>pic2</p> <img src="/img/pic2.jpg" /> </li>
<li> <p>pic3</p> <img src="/img/pic3.jpg" /> </li>
</ul>
</body>
</html>
'''
# 获取网页title
title = cutout(
data=datastr,
start="<title>",
end="</title>"
)
print(title) # html网页标题
# 获取图片地址
href = cutout(
data=datastr,
start="<ul id=\"img\">",
end="</ul>",
split="<li>", #分割
dealwith=({
"start":"<p>", #获取名称
"end":"</p>"
},{
"start":'<img src="', #获取网址
"rid":'"',
"end":'"'
})
)
print(href) # [['', None], ['pic1', '/img/pic1.jpg'], [
|
'pic2', '/
|
img/pic2.jpg'], ['pic3', '/img/pic3.jpg']]
# 获取的结果数组第一个为 ['', None] 因为以 <li> 分割时 第一段字符为空
exit(0);
from cutout.cache import FileCache
print('\n\n######## cache缓存测试\n')
print("\n## FileCache 文件缓存测试\n")
key = '缓存键 hash key'
c = FileCache('./cache') #指定缓存目录
c.set(key, ['2w3w','agafd'],10)
g = c.get(key)
print('取回设置的缓存值:'+g[1])
import cutout.util as util
print('\n\n######## util工具类测试')
print('\n## rangable 限定数值范围\n')
print('128 (50-100) => '+str(util.rangable(128,50,100)))
print('32.5 (50.4-100) => '+str(util.rangable(32.5,50.4,100)))
print('32 (50.4-无上限) => '+str(util.rangable(32.5,50.4)))
print('128 (无下限-100) => '+str(util.rangable(128,top=100)))
print('\n## parse_argv 解析命令行参数\n')
print(
"['test.py','-k','key','-n','num'] => "
+str(util.parse_argv(['test.py','-k','key','-n','num']))
)
print('\n## sec2time time2sec 时间格式转化\n')
print('1324 => '+util.sec2time(1324))
print('22:04 => '+str(util.time2sec('22:04')))
print('\n## urlencode urldncode url编码解码\n')
print('中文汉字 => '+util.urlencode('中文汉字'))
print(
'%E4%B8%AD%E6%96%87%E6%B1%89%E5%AD%97 => '
+util.urldecode('%E4%B8%AD%E6%96%87%E6%B1%89%E5%AD%97')
)
print("{'name':'汉字','title':'标题'} => "
+util.urlencode({'name':'汉字','title':'标题'}))
from cutout import cutout, cutouts, download
from cutout.common import ProgressBar
print('\n\n######## cutout抓取函数')
print('\n## cutout 抓取百度音乐PC版软件下载地址 http://music.baidu.com\n')
exe_href = cutout(
url='http://music.baidu.com', #第一步抓取指定内容
start='<a class="downloadlink-pc"',
end='>下载PC版</a>',
dealwith={
'start':'href="', #第二部抓取 href 链接
'rid':'"',
'end':'"'
}
)
print(exe_href)
print('\n## url_download 下载 '+exe_href+' 显示下载进度条\n')
#自定义下载进度条
bar = ProgressBar(piece_total=1);
bar.face(
sh_piece_division=1024, #piece 除法
sh_piece_unit='KB' #piece 单位
)
download(exe_href,showBar=bar)
print('\n\n######## 多线程数据抓取\n')
urls = []
for i in range(18):
'''
urls.append('http://www.tvmao.com/query.jsp?keys=%E6%B9%96%E5%8C%97%E4%BD%93%E8%82%B2')
urls.append('http://jojoin.com/')
urls.append('http://codekart.jojoin.com/')
urls.append('http://docs.codekart.jojoin.com/')
urls.append('http://v.duole.com/')
urls.append('http://www.taobao.com/')
urls.append('http://www.baidu.com/')
urls.append('http://blog.csdn.net/vah101/article/details/6175406')
urls.append('http://www.cnblogs.com/wxw0813/archive/2012/09/18/2690694.html')
urls.append('http://woodpecker.org.cn/diveintopython3/')
urls.append('http://www.pythonclub.org/python-basic/threading')
urls.append('http://developer.51cto.com/art/201003/185569.htm')
'''
urls.append('http://v.baidu.com/tv/21331.htm')
#urls.append('')
sti = time.time()
bdata = cutouts(urls=urls)
eti = time.time()
#print(bdata)
print('并发抓取%d个页面:'%len(urls)+'%.2f'%(eti-sti)+'秒\n')
bdata = []
sti = time.time()
for u in urls:
bdata.append(cutout(u))
eti = time.time()
#print(bdata)
print('顺序抓取%d个页面:'%len(urls)+'%.2f'%(eti-sti)+'秒\n')
print('\n\n#### cutout已完成所有测试!!!')
|
Azure/azure-sdk-for-python
|
sdk/batchai/azure-mgmt-batchai/azure/mgmt/batchai/aio/operations/_jobs_operations.py
|
Python
|
mit
| 44,498
| 0.005371
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations:
"""JobsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~batch_ai.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_experiment(
self,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
jobs_list_by_experiment_options: Optional["_models.JobsListByExperimentOptions"] = None,
**kwargs: Any
) -> AsyncIterable["_models.JobListResult"]:
"""Gets a list of Jobs within the specified Experiment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param experiment_name: The name of the experiment. Experiment names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type experiment_name: str
:param jobs_list_by_experiment_options: Parameter group.
:type jobs_list_by_experiment_options: ~batch_ai.models.JobsListByExperimentOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~batch_ai.models.JobListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_max_results = None
if jobs_list_by_experiment_options is not None:
_max_results = jobs_list_by_experiment_options.max_results
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
|
if not next_link:
# Construct URL
url = self.list_by_experiment.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._s
|
erialize.url("experiment_name", experiment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", _max_results, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('JobListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_experiment.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/experiments/{experimentName}/jobs'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
job_name: str,
parameters: "_models.JobCreateParameters",
**kwargs: Any
) -> Optional["_models.Job"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Job"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'experimentName': self._serialize.url("experiment_name", experiment_name, 'str',
|
hvanwyk/quadmesh
|
src/mesh.py
|
Python
|
mit
| 263,904
| 0.010803
|
#import matplotlib.pyplot as plt
import numpy as np
from collections import deque
import numbers
"""
Created on Jun 29, 2016
@author: hans-werner
"""
def convert_to_array(x, dim=None, return_is_singleton=False):
"""
Convert point or list of points to a numpy array.
Inputs:
x: (list of) point(s) to be converted to an array. Allowable inputs are
1. a list of Vertices,
2. a list of tuples,
3. a list of numbers or (2,) arrays
4. a numpy array of the approriate size
dim: int, (1 or 2) optional number used to adjudicate ambiguous cases.
return_is_singleton: bool, if True, return whether the input x is a
singleton.
Outputs:
x: double, numpy array containing the points in x.
If x is one-dimensional (i.e. a list of 1d Vertices, 1-tuples, or
a 1d vector), convert to an (n,1) array.
If x is two-dimensional (i.e. a list of 2d Vertices, 2-tupples, or
a 2d array), return an (n,2) array.
"""
is_singleton = False
if type(x) is list:
#
# Points in list
#
if all(isinstance(xi, Vertex) for xi in x):
#
# All points are of type vertex
#
x = [xi.coordinates() for xi in x]
x = np.array(x)
elif all(type(xi) is tuple for xi in x):
#
# All points are tuples
#
x = np.array(x)
elif all(type(xi) is numbers.Real for xi in x):
#
# List of real numbers -> turn into (n,1) array
#
x = np.array(x)
x = x[:,np.newaxis]
elif all(type(xi) is np.ndarray for xi in x):
#
# list of (2,) arrays
#
x = np.array(x)
else:
raise Exception(['For x, use arrays or lists'+\
'of tuples or vertices.'])
elif isinstance(x, Vertex):
#
# A single vertex
#
x = np.array([x.coordinates()])
is_singleton = True
elif isinstance(x, numbers.Real):
if dim is not None:
assert dim==1, 'Dimension should be 1.'
x = np.array([[x]])
is_singleton = True
elif type(x) is tuple:
#
# A tuple
#
if len(x)==1:
#
# A oneple
#
x, = x
x = np.array([[x]])
is_singleton = True
elif len(x)==2:
#
# A tuple
#
x,y = x
x = np.array([[x,y]])
is_singleton = True
elif type(x) is np.ndarray:
#
# Points in numpy array
#
if len(x.shape)==1:
#
# x is a one-dimensional vector
if len(x)==1:
#
# x is a vector with one entry
#
if dim is not None:
assert dim==1, 'Incompatible dimensions'
x = x[:,np.newaxis]
if len(x) == 2:
#
# x is a vector 2 entries: ambiguous
#
if dim == 2:
#
# Turn 2-vector into a (1,2) array
#
x = x[np.newaxis,:]
else:
#
# Turn vector into (2,1) array
#
x = x[:,np.newaxis]
else:
#
# Turn vector into (n,1) array
#
x = x[:,np.newaxis]
elif len(x.shape)==2:
assert x.shape[1]<=2,\
'Dimension of array should be at most 2'
else:
raise Exception('Only 1- or 2 dimensional arrays allowed.')
if return_is_singleton:
# Specify whether x is a singleton
return x, is_singleton
else:
return x
class Markable(object):
"""
Description: Any object that can be assigned a flag
"""
def __init__(self):
"""
Constructor
"""
self.__flag = None
def mark(self, flag):
"""
"""
pass
def unmark(self, flag):
"""
Remove flag
"""
pass
def is_marked(self, flag):
"""
Determine whether
"""
pass
class Tree(object):
"""
Description: Tree object for storing and manipulating adaptively
refined quadtree meshes.
Attributes:
node_type: str, specifying node's relation to parents and/or children
'ROOT' (no parent node),
'BRANCH' (parent & children), or
'LEAF' (parent but no children)
address: int, list allowing access to node's location within the tree
General form [k0, k1, ..., kd], d=depth, ki in [0,...,n_children_i]
address = [] if ROOT node.
depth: int, depth within the tree (ROOT nodes are at depth 0).
parent: Tree/Mesh whose child this is
children: list of child nodes.
flag: set, of str/int/bool allowing tree nodes to be marked.
"""
def __init__(self, n_children=None, regular=True, flag=None,
parent=None, position=None, forest=None):
"""
Constructor
"""
#
# Set some attributes
#
self._is_regular = regular
self._parent = parent
self._forest = None
self._in_forest = False
self._node_position = position
#
# Set flags
#
self._flags = set()
if flag is not None:
if type(flag) is set:
# Add all flags in set
for f in flag:
self.mark(f)
else:
# Add single flag
self.mark(flag)
if parent is None:
#
# ROOT Tree
#
self._node_type = 'ROOT'
self._node_depth = 0
self._node_address = []
if self.is_regular():
# Ensure that the number of ROOT children is specified
assert n_children is not None, \
'ROOT node: Specify number of children.'
else:
# Not a regular tree: number of children 0 initially
n_children = 0
if forest is not None:
#
# Tree contained in a Forest
#
assert isinstance(forest, Forest), \
'Input grid must be an instance of Grid cla
|
ss.'
#
# Add tree to forest
#
forest.add_tree(self)
self._in_forest = True
self._forest = forest
self._node_address = [self.get_node_position()]
else:
#
# Free standing ROOT cell
#
|
assert self.get_node_position() is None, \
'Unattached ROOT cell has no position.'
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
else:
#
# LEAF Node
#
position_missing = 'Position within parent cell must be specified.'
assert self.get_node_position() is not None, position_missing
self._node_type = 'LEAF'
# Determine cell's depth and address
self._node_depth = parent.get_depth() + 1
self._node_address = parent.get_
|
OCA/vertical-medical
|
medical_practitioner/models/medical_practitioner.py
|
Python
|
gpl-3.0
| 1,734
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# Copyright 2017 Creu Blanca
# Copyright 2017 Eficent Business and IT Consulting Services, S.L.
# License GPL-3.0 or later (http://www.gnu.org/licenses/gpl.html).
from odoo import api, fields, models, modules
class MedicalPractitioner(models.Model):
_name = 'medical.practitioner'
_description = 'Medical Practitioner'
_inherit = 'medical.abstract.entit
|
y'
_sql_constraints = [(
'medical_practitioner_unique_code',
'UNIQUE (code)',
'Internal ID must be unique',
)]
role_ids = fields.Many2many(
string='Roles',
comodel_name='medical.role',
)
practitioner_type = fields.Selection(
string='Entity Type',
selection=[('internal', 'Internal Entity'),
('external', 'External Entity')],
readonly=False,
)
code = fields.Char(
string='Internal ID',
|
help='Unique ID for this physician',
required=True,
default=lambda s: s.env['ir.sequence'].next_by_code(s._name + '.code'),
)
specialty_ids = fields.Many2many(
string='Specialties',
comodel_name='medical.specialty',
)
@api.model
def _get_default_image_path(self, vals):
res = super(MedicalPractitioner, self)._get_default_image_path(vals)
if res:
return res
practitioner_gender = vals.get('gender', 'male')
if practitioner_gender == 'other':
practitioner_gender = 'male'
image_path = modules.get_module_resource(
'medical_practitioner',
'static/src/img',
'practitioner-%s-avatar.png' % practitioner_gender,
)
return image_path
|
gridsync/gridsync
|
scripts/make_appimage.py
|
Python
|
gpl-3.0
| 4,585
| 0.000218
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
import glob
import os
import shutil
import subprocess
import sys
config = RawConfigParser(allow_no_value=True)
config.read(os.path.join('gridsync', 'resources', 'config.txt'))
settings = {}
for section in config.sections():
if section not in settings:
settings[section] = {}
for option, value in config.items(section):
settings[section][option] = value
name = settings['application']['name']
name_lower = name.lower()
linux_icon = settings['build']['linux_icon']
appdir_usr = os.path.join('build', 'AppDir', 'usr')
appdir_bin = os.path.join(appdir_usr, 'bin')
try:
os.makedirs(appdir_usr)
except OSError:
pass
try:
shutil.copytree(os.path.join('dist', name), appdir_bin)
except OSError:
pass
_, ext = os.path.splitext(linux_icon)
icon_filepath = os.path.abspath(os.path.join('build', name_lower + ext))
shutil.copy2(linux_icon, icon_filepath)
desktop_filepath = os.path.join('build', '{}.desktop'.format(name))
with open(desktop_filepath, 'w') as f:
f.write('''[Desktop Entry]
Categories=Utility;
Type=Application
Name={0}
Exec={1}
Icon={1}
'''.format(name, name_lower)
)
os.environ['LD_LIBRARY_PATH'] = appdir_bin
os.environ['APPIMAGE_EXTRACT_AND_RUN'] = '1'
linuxdeploy_args = [
'linuxdeploy',
'--appdir=build/AppDir',
'--executable={}'.format(os.path.join(appdir_usr, 'bin', name_lower)),
'--icon-file={}'.format(icon_filepath),
'--desktop-file={}'.format(desktop_filepath),
]
try:
returncode = subprocess.call(linuxdeploy_args)
except OSError:
sys.exit(
'ERROR: `linuxdeploy` utility not found. Please ensure that it is '
'on your $PATH and executable as `linuxdeploy` and try again.\n'
'`linuxdeploy` can be downloaded from https://github.com/linuxdeploy/'
'linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage'
)
if returncode:
# XXX Ugly hack/workaround for "ERROR: Strip call failed: /tmp/.mount_linuxdns8a8k/usr/bin/strip: unable to copy file 'build/AppDir/usr/lib/libpython3.7m.so.1.0'; reason: Permission denied" observed on Travis-CI
os.chmod(glob.glob('build/AppDir/usr/lib/libpython*.so.*')[0], 0o755)
subprocess.call(linuxdeploy_args)
for file in sorted(os.listdir(appdir_bin)):
# The `linuxdeploy` utility adds a copy of each library to AppDir/usr/lib,
# however, the main PyInstaller-generated ("gridsync") executable expects
# these libraries to be located in the same directory as the ("gridsync")
# executable itself (resulting in *two* copies of each library and thus
# wasted disk-space); removing the copies inserted by `linuxdeploy` -- and
# and replacing them with symlinks to the originals -- saves disk-space.
dst = 'build/AppDir/usr/lib/{}'.format(file)
if os.path.exists(dst):
try:
os.remove(dst)
except OSError:
print('WARNING: Could not remove file {}'.format(dst))
continue
src = '../bin/{}'.format(file)
print('Creating symlink: {} -> {}'.format(dst, src))
try:
os.symlink(src, dst)
except OSError:
print('WARNING: Could not create symlink for {}'.format(dst))
os.remove('build/AppDir/AppRun')
with open('build/AppDir/AppRun', 'w') as f:
f.write('''#!/bin/sh
exec "$(dirnam
|
e "$(readlink -e "$0")")/usr/bin/{}" "$@"
'''.format(name_lower)
)
os.chmod('build/AppDir/AppRun', 0o755)
# Create the .DirIcon symlink here/now to prevent appimagetool from
# doing it later, thereby allowing the atime and mtime of the symlink
# to be overriden along with all of the other files in the AppDir.
try:
os.symlink(os.path.ba
|
sename(icon_filepath), "build/AppDir/.DirIcon")
except OSError:
pass
subprocess.call(["python3", "scripts/update_permissions.py", "build/AppDir"])
subprocess.call(["python3", "scripts/update_timestamps.py", "build/AppDir"])
try:
os.mkdir('dist')
except OSError:
pass
try:
subprocess.call([
'appimagetool', 'build/AppDir', 'dist/{}.AppImage'.format(name)
])
except OSError:
sys.exit(
'ERROR: `appimagetool` utility not found. Please ensure that it is '
'on your $PATH and executable as `appimagetool` and try again.\n'
'`appimagetool` can be downloaded from https://github.com/AppImage/A'
'ppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage'
)
|
VillanCh/vscanner
|
vplugin/nmap/nmap.py
|
Python
|
apache-2.0
| 41,045
| 0.004629
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nmap.py - version and date, see below
Source code : https://bitbucket.org/xael/python-nmap
Author :
* Alexandre Norman - norman at xael.org
Contributors:
* Steve 'Ashcrow' Milner - steve at gnulinux.net
* Brian Bustin - brian at bustin.us
* old.schepperhand
* Johan Lundberg
* Thomas D. maaaaz
* Robert Bost
* David Peltier
Licence: GPL v3 or any later version for python-nmap
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
**************
IMPORTANT NOTE
**************
The Nmap Security Scanner used by python-nmap is distributed
under it's own licence that you can find at https://svn.nmap.org/nmap/COPYING
Any redistribution of python-nmap along with the Nmap Security Scanner
must conform to the Nmap Security Scanner licence
"""
__author__ = 'Alexandre Norman (norman@xael.org)'
__version__ = '0.6.0'
__last_modification__ = '2016.03.15'
import csv
import io
import os
import re
import shlex
import subprocess
import sys
from xml.etree import ElementTree as ET
try:
from multiprocessing import Process
except ImportError:
# For pre 2.6 releases
from threading import Thread as Process
############################################################################
class PortScanner(object):
"""
PortScanner class allows to use nmap from python
"""
def __init__(self, nmap_search_path=('nmap', '/usr/bin/nmap', '/usr/local/bin/nmap', '/sw/bin/nmap', '/opt/local/bin/nmap')):
"""
Initialize PortScanner module
* detects nmap on the system and nmap version
* may raise PortScannerError exception if nmap is not found in the path
:param nmap_search_path: tupple of string where to search for nmap executable. Change this if you want to use a specific version of nmap.
:returns: nothing
"""
self._nmap_path = '' # nmap path
self._s
|
can_result = {}
self._nmap_vers
|
ion_number = 0 # nmap version number
self._nmap_subversion_number = 0 # nmap subversion number
self._nmap_last_output = '' # last full ascii nmap output
is_nmap_found = False # true if we have found nmap
self.__process = None
# regex used to detect nmap (http or https)
regex = re.compile(
'Nmap version [0-9]*\.[0-9]*[^ ]* \( http(|s)://.* \)'
)
# launch 'nmap -V', we wait after
#'Nmap version 5.0 ( http://nmap.org )'
# This is for Mac OSX. When idle3 is launched from the finder, PATH is not set so nmap was not found
for nmap_path in nmap_search_path:
try:
if sys.platform.startswith('freebsd') \
or sys.platform.startswith('linux') \
or sys.platform.startswith('darwin'):
p = subprocess.Popen([nmap_path, '-V'],
bufsize=10000,
stdout=subprocess.PIPE,
close_fds=True)
else:
p = subprocess.Popen([nmap_path, '-V'],
bufsize=10000,
stdout=subprocess.PIPE)
except OSError:
pass
else:
self._nmap_path = nmap_path # save path
break
else:
raise PortScannerError(
'nmap program was not found in path. PATH is : {0}'.format(
os.getenv('PATH')
)
)
self._nmap_last_output = bytes.decode(p.communicate()[0]) # sav stdout
for line in self._nmap_last_output.split(os.linesep):
if regex.match(line) is not None:
is_nmap_found = True
# Search for version number
regex_version = re.compile('[0-9]+')
regex_subversion = re.compile('\.[0-9]+')
rv = regex_version.search(line)
rsv = regex_subversion.search(line)
if rv is not None and rsv is not None:
# extract version/subversion
self._nmap_version_number = int(line[rv.start():rv.end()])
self._nmap_subversion_number = int(
line[rsv.start()+1:rsv.end()]
)
break
if not is_nmap_found:
raise PortScannerError('nmap program was not found in path')
return
def get_nmap_last_output(self):
"""
Returns the last text output of nmap in raw text
this may be used for debugging purpose
:returns: string containing the last text output of nmap in raw text
"""
return self._nmap_last_output
def nmap_version(self):
"""
returns nmap version if detected (int version, int subversion)
or (0, 0) if unknown
:returns: (nmap_version_number, nmap_subversion_number)
"""
return (self._nmap_version_number, self._nmap_subversion_number)
def listscan(self, hosts='127.0.0.1'):
"""
do not scan but interpret target hosts and return a list a hosts
"""
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts)) # noqa
output = self.scan(hosts, arguments='-sL')
# Test if host was IPV6
try:
if 'looks like an IPv6 target specification' in output['nmap']['scaninfo']['error'][0]: # noqa
self.scan(hosts, arguments='-sL -6')
except KeyError:
pass
return self.all_hosts()
def scan(self, hosts='127.0.0.1', ports=None, arguments='-sV', sudo=False):
"""
Scan given hosts
May raise PortScannerError exception if nmap output was not xml
Test existance of the following key to know
if something went wrong : ['nmap']['scaninfo']['error']
If not present, everything was ok.
:param hosts: string for hosts as nmap use it 'scanme.nmap.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: string for ports as nmap use it '22,53,110,143-4564'
:param arguments: string of arguments for nmap '-sU -sX -sC'
:param sudo: launch nmap with sudo if True
:returns: scan_result as dictionnary
"""
if sys.version_info[0]==2:
assert type(hosts) in (str, unicode), 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts)) # noqa
assert type(ports) in (str, unicode, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports)) # noqa
assert type(arguments) in (str, unicode), 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments)) # noqa
else:
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts)) # noqa
assert type(ports) in (str, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports)) # noqa
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments)) # noqa
for redirecting_output in ['-oX', '-oA']:
assert redirecting_output not in arguments, 'Xml output can\'t be redirected from command line.\nYou can access it after a scan using:\nnmap.nm.get_nmap_last_output()' # noqa
h_args = shlex.split(hosts)
f_args = shlex.split(arguments)
# Launch sc
|
nkhare/rockstor-core
|
src/rockstor/storageadmin/urls/users.py
|
Python
|
gpl-3.0
| 1,027
| 0.000974
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of Rock
|
Stor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for mor
|
e details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls import patterns, url
from storageadmin.views import (UserListView, UserDetailView)
from django.conf import settings
urlpatterns = patterns(
'',
# User configuration
url(r'^$', UserListView.as_view()),
url(r'(?P<username>%s)$' % settings.USERNAME_REGEX, UserDetailView.as_view()),
)
|
QualiApps/obdlib
|
tests/test_utils.py
|
Python
|
mit
| 6,692
| 0.000299
|
import unittest
import obdlib.utils as utils
class TestUtils(unittest.TestCase):
def setUp(self):
utils.unit_english = 0
def test_rpm(self):
assert utils.rpm('0000') == 0.0
assert utils.rpm('FFFF') == 16383.75
def test_speed(self):
# unit_english == 0
self.assertEquals(utils.speed('00'), 0.0)
self.assertEquals(utils.speed('FF'), 255)
def test_speed_english(self):
# unit_english == 1
utils.unit_english = 1
self.assertEqual(utils.speed('00'), 0.0)
self.assertEqual(utils.speed('FF'), 158.44965396)
def test_load_value(self):
self.assertEqual(utils.load_value('00'), 0)
self.assertEqual(utils.load_value('FF'), 100)
def test_term_fuel(self):
self.assertEqual(utils.term_fuel('00'), -100)
self.assertEqual(utils.term_fuel('FF'), 99.22)
def test_fuel_pressure(self):
# unit_english == 0
self.assertEqual(utils.fuel_pressure('00'), 0)
self.assertEqual(utils.fuel_pressure('FF'), 765)
# unit_english == 1
utils.unit_english = 1
self.assertEqual(utils.fuel_pressure('00'), 0)
self.assertEqual(utils.fuel_pressure('FF'), 110.95)
def test_absolute_pressure(self):
# unit_english == 0
self.assertEqual(utils.absolute_pressure('00'), 0)
self.assertEqual(utils.absolute_pressure('FF'), 255)
# unit_english == 1
utils.unit_english = 1
self.assertEqual(utils.absolute_pressure('00'), 0)
self.assertEqual(utils.absolute_pressure('FF'), 36.98)
def test_timing_advance(self):
self.assertEqual(utils.timing_advance('00'), -64)
self.assertEqual(utils.timing_advance('FF'), 63.5)
def test_air_flow_rate(self):
self.assertEqual(utils.air_flow_rate('0000'), 0)
self.assertEqual(utils.air_flow_rate('FFFF'), 655.35)
def test_throttle_pos(self):
self.assertEqual(utils.throttle_pos('00'), 0)
self.assertEqual(utils.throttle_pos('FF'), 100)
def test_air_status(self):
self.assertIsNone(utils.air_status('00'))
self.assertEqual(utils.air_status('01'), 'Upstream')
self.assertEqual(utils.air_status('02'),
'Downstream of catalytic converter')
self.assertEqual(utils.air_sta
|
tus('04'),
'From the outsid
|
e atmosphere or off')
self.assertEqual(utils.air_status('08'),
'Pump commanded on for diagnostics')
def test_voltage(self):
self.assertEqual(utils.voltage('00'), 0)
self.assertEqual(utils.voltage('FF'), 1.275)
def test_coolant_temp(self):
self.assertEqual(utils.coolant_temp('00'), -40)
self.assertEqual(utils.coolant_temp('FF'), 215)
# unit_english == 1
utils.unit_english = 1
self.assertEqual(utils.coolant_temp('00'), -40)
self.assertEqual(utils.coolant_temp('FF'), 419.0)
def test_obd_standards(self):
self.assertEqual(utils.obd_standards('FF'), None)
self.assertEqual(utils.obd_standards('01'),
'OBD-II as defined by the CARB')
def test_time(self):
self.assertEqual(utils.time('0000'), 0)
self.assertEqual(utils.time('FFFF'), 65535)
def test_oil_temp(self):
self.assertEqual(utils.oil_temp('00'), -40)
self.assertEqual(utils.oil_temp('FF'), 215)
# unit_english == 1
utils.unit_english = 1
self.assertEqual(utils.oil_temp('00'), -40)
self.assertEqual(utils.oil_temp('FF'), 419.0)
def test_fuel_type(self):
self.assertEqual(utils.fuel_type('00'), 'Not Available')
self.assertEqual(utils.fuel_type('05'), 'Liquefied petroleum gas (LPG)')
self.assertEqual(utils.fuel_type('FF'), None)
def test_vin(self):
self.assertEqual(utils.vin(
'0000003144344750303052353542313233343536'),
'1D4GP00R55B123456')
def test_ecu_name(self):
self.assertEqual(utils.ecu_name(
'3144344750303052353542313233343536'),
'1D4GP00R55B123456'
)
def test_fuel_system_status(self):
self.assertEqual(
utils.fuel_system_status('0100'),
(
'Open loop due to insufficient engine temperature',
'No fuel system available'
)
)
def test_oxygen_sensors(self):
self.assertEqual(utils.oxygen_sensors(
'FC'),
[1, 1, 1, 1, 1, 1, 0, 0]
)
def test_aux_input_status(self):
aux = utils.aux_input_status('FF')
self.assertTrue(aux)
def test_dtc_statuses(self):
expected = {'base_tests': [(0, 0), (0, 1), (0, 1)],
'compression_tests': [(0, 0), (1, 0), (0, 0),
(1, 1), (0, 0), (1, 0)],
'dtc': 3,
'ignition_test': 0,
'mil': 1,
'spark_tests': [(0, 0), (1, 0), (1, 0), (0, 0),
(0, 0), (1, 1), (0, 0), (1, 0)]}
statuses = utils.dtc_statuses('83076504')
self.assertEqual(statuses, expected)
def test_trouble_codes(self):
dtc_s = utils.trouble_codes('0133D0161131')
self.assertEqual(dtc_s, ['P0133', 'U1016', 'P1131'])
dtc_s = utils.trouble_codes('013300000000')
self.assertEqual(dtc_s, ['P0133'])
def test_bitwise_pids(self):
"""
Verify we correctly parse information about supported PIDs on a 1999
Dodge Durango
"""
durango_supported_pids = 'BE3EB810'
supported_pids = utils.bitwise_pids(durango_supported_pids)
assert supported_pids == {
'01': True,
'02': False,
'03': True,
'04': True,
'05': True,
'06': True,
'07': True,
'08': False,
'09': False,
'0A': False,
'0B': True,
'0C': True,
'0D': True,
'0E': True,
'0F': True,
'10': False,
'11': True,
'12': False,
'13': True,
'14': True,
'15': True,
'16': False,
'17': False,
'18': False,
'19': False,
'1A': False,
'1B': False,
'1C': True,
'1D': False,
'1E': False,
'1F': False,
'20': False
}
suite = unittest.TestLoader().loadTestsFromTestCase(TestUtils)
unittest.TextTestRunner(verbosity=2).run(suite)
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_versions_delete_version_async.py
|
Python
|
apache-2.0
| 1,427
| 0.000701
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteVersion
# NOTE: This
|
snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Versions_DeleteVersion_async]
from google.cloud import dialogflow_v2
async def sample_delete_version():
# Create a client
client = dialogflow_v2.VersionsAs
|
yncClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteVersionRequest(
name="name_value",
)
# Make the request
await client.delete_version(request=request)
# [END dialogflow_generated_dialogflow_v2_Versions_DeleteVersion_async]
|
gems-uff/labsys
|
labsys/main/__init__.py
|
Python
|
mit
| 206
| 0
|
import os
from .views import blueprint
@blueprint.app_context_processor
def inje
|
ct_permissions():
|
show_labsys = not os.environ.get('SHOW_LABSYS') == 'False'
return dict(show_labsys=show_labsys)
|
xfxf/veyepar
|
dj/main/migrations/0007_auto_20160710_1833.py
|
Python
|
mit
| 560
| 0.001786
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on
|
2016-07-10 18:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependenci
|
es = [
('main', '0006_auto_20160616_1640'),
]
operations = [
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(blank=True, default='41086227', help_text='key to allow unauthenticated users to edit this item.', max_length=32, null=True),
),
]
|
foursquare/commons-old
|
src/python/twitter/pants/base/hash_utils.py
|
Python
|
apache-2.0
| 192
| 0.026042
|
import has
|
hlib
def hash_all(strs):
"""Returns a hash of the concatenation of all the strings in strs."""
sha = hashlib.sha1(
|
)
for s in strs:
sha.update(s)
return sha.hexdigest()
|
jicruz/heroku-bot
|
cogs/general.py
|
Python
|
gpl-3.0
| 17,226
| 0.002979
|
import discord
from discord.ext import commands
from .utils.chat_formatting import escape_mass_mentions, italics, pagify
from random import randint
from random import choice
from enum import Enum
from urllib.parse import quote_plus
import datetime
import time
import aiohttp
import asyncio
settings = {"POLL_DURATION" : 60}
class RPS(Enum):
rock = "\N{MOYAI}"
paper = "\N{PAGE FACING UP}"
scissors = "\N{BLACK SCISSORS}"
class RPSParser:
def __init__(self, argument):
argument = argument.lower()
if argument == "rock":
self.choice = RPS.rock
elif argument == "paper":
self.choice = RPS.paper
elif argument == "scissors":
self.choice = RPS.scissors
else:
raise
class General:
"""General commands."""
def __init__(self, bot):
self.bot = bot
self.stopwatches = {}
self.ball = ["As I see it, yes", "It is certain", "It is decidedly so", "Most likely", "Outlook good",
"Signs point to yes", "Without a doubt", "Yes", "Yes – definitely", "You may rely on it", "Reply hazy, try again",
"Ask again later", "Better not tell you now", "Cannot predict now", "Concentrate and ask again",
"Don't count on it", "My reply is no", "My sources say no", "Outlook not so good", "Very doubtful"]
self.poll_sessions = []
@commands.command(hidden=True)
async def ping(self):
"""Pong."""
await self.bot.say("Pong.")
@commands.command()
async def choose(self, *choices):
"""Chooses between multiple choices.
To denote multiple choices, you should use double quotes.
"""
choices = [escape_mass_mentions(c) for c in choices]
if len(choices) < 2:
await self.bot.say('Not enough choices to pick from.')
else:
await self.bot.say(choice(choices))
@commands.command(pass_context=True)
async def roll(self, ctx, number : int = 100):
"""Rolls random number (between 1 and user choice)
Defaults to 100.
"""
author = ctx.message.author
if number > 1:
n = randint(1, number)
await self.bot.say("{} :game_die: {} :game_die:".format(author.mention, n))
else:
await self.bot.say("{} Maybe higher than 1? ;P".format(author.mention))
@commands.command(pass_context=True)
async def flip(self, ctx, user : discord.Member=None):
"""Flips a coin... or a user.
Defaults to coin.
"""
if user != None:
msg = ""
if user.id == self.bot.user.id:
user = ctx.message.author
msg = "Nice try. You think this is funny? How about *this* instead:\n\n"
char = "abcdefghijklmnopqrstuvwxyz"
tran = "ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz"
table = str.maketrans(char, tran)
name = user.display_name.translate(table)
char = char.upper()
tran = "∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z"
table = str.maketrans(char, tran)
name = name.translate(table)
await self.bot.say(msg + "(╯°□°)╯︵ " + name[::-1])
else:
await self.bot.say("*flips a coin and... " + choice(["HEADS!*", "TAILS!*"]))
@commands.command(pass_context=True)
async def rps(self, ctx, your_choice : RPSParser):
"""Play rock paper scissors"""
author = ctx.message.author
player_choice = your_choice.choice
red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))
cond = {
(RPS.rock, RPS.paper) : False,
(RPS.rock, RPS.scissors) : True,
(RPS.paper, RPS.rock) : True,
(RPS.paper, RPS.scissors) : False,
(RPS.scissors, RPS.rock) : False,
(RPS.scissors, RPS.paper) : True
}
if red_choice == player_choice:
outcome = None # Tie
else:
outcome = cond[(player_choice, red_choice)]
if outcome is True:
await self.bot.say("{} You win {}!"
"".format(red_choice.value, author.mention))
elif outcome is False:
await self.bot.say("{} You lose {}!"
"".format(red_choice.value, author.mention))
else:
await self.bot.say("{} We're square {}!"
"".format(red_choice.value, author.mention))
@commands.command(name="8", aliases=["8ball"])
async def _8ball(self, *, question : str):
"""Ask 8 ball a question
Question must end with a question mark.
"""
if question.endswith("?") and question != "?":
await self.bot.say("`" + choice(self.ball) + "`")
else:
await self.bot.say("That doesn't look like a question.")
@commands.command(aliases=["sw"], pass_context=True)
async def stopwatch(self, ctx):
"""Starts/stops stopwatch"""
author = ctx.message.author
if not author.id in self.stopwatches:
self.stopwatches[author.id] = int(time.perf_counter())
await self.bot.say(author.mention + " Stopwatch started!")
else:
tmp = abs(self.stopwatches[author.id] - int(time.perf_counter()))
tmp = str(datetime.timedelta(seconds=tmp))
await self.bot.say(author.mention + " Stopwatch stopped! Time: **" + tmp + "**")
self.stopwatches.pop(author.id, None)
@commands.command()
async def lmgtfy(self, *, search_terms : str):
"""Creates a lmgtfy link"""
search_terms = escape_mass_mentions(search_terms.replace(" ", "+"))
await self.bot.say("https://lmgtfy.com/?q={}".format(search_terms))
@commands.command(no_pm=True, hidden=True)
async def hug(self, user : discord.Member, intensity : int=1):
"""Because everyone likes hugs
Up to 10 intensity levels."""
name = italics(user.display_name)
if intensity <= 0:
msg = "(っ˘̩╭╮˘̩)っ" + name
elif intensity <= 3:
msg = "(っ´▽`)っ" + name
elif intensity <= 6:
msg = "╰(*´︶`*)╯" + name
elif intensity <= 9:
msg = "(つ≧▽≦)つ" + name
elif intensity >= 10:
msg = "(づ ̄ ³ ̄)づ{} ⊂(´・ω・`⊂)".format(name)
await self.bot.say(msg)
@commands.command(pass_context=True, no_pm=True)
async def userinfo(self, ctx, *, user: discord.Member=None):
"""Shows users's informations"""
author = ctx.message.author
server = ctx.message.server
if not user:
user = author
roles = [x.name for x in user.roles if x.name != "@everyone"]
|
joined_at = self.fetch_joined_at(user, server)
since_created = (ctx.message.timestamp - user.created_at).days
since_joined = (ctx.message.timestamp - joined_at).days
user_joined = joined_at.strftime("%d %b %Y %H:%M")
user_created = user.created_at.strftime("%d %b %Y %H:%M")
member_number = sorted(server.members,
key=lambda m: m.joined_at).index(user) + 1
created_on =
|
"{}\n({} days ago)".format(user_created, since_created)
joined_on = "{}\n({} days ago)".format(user_joined, since_joined)
game = "Chilling in {} status".format(user.status)
if user.game is None:
pass
elif user.game.url is None:
game = "Playing {}".format(user.game)
else:
game = "Streaming: [{}]({})".format(user.game, user.game.url)
if roles:
roles = sorted(roles, key=[x.name for x in server.role_hierarchy
if x.name != "@everyone"].index)
roles = ", ".join(roles)
else:
roles = "None"
data =
|
pferreir/indico
|
indico/modules/oauth/blueprint.py
|
Python
|
mit
| 2,547
| 0.006675
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
|
from flask import request
from indico.modules.oauth.controllers import (RHO
|
AuthAdmin, RHOAuthAdminApplication, RHOAuthAdminApplicationDelete,
RHOAuthAdminApplicationNew, RHOAuthAdminApplicationReset,
RHOAuthAdminApplicationRevoke, RHOAuthAuthorize, RHOAuthIntrospect,
RHOAuthMetadata, RHOAuthRevoke, RHOAuthToken, RHOAuthUserAppRevoke,
RHOAuthUserProfile)
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('oauth', __name__, template_folder='templates', virtual_template_folder='oauth')
# Application endpoints
_bp.add_url_rule('/.well-known/oauth-authorization-server', 'oauth_metadata', RHOAuthMetadata)
_bp.add_url_rule('/oauth/authorize', 'oauth_authorize', RHOAuthAuthorize, methods=('GET', 'POST'))
_bp.add_url_rule('/oauth/token', 'oauth_token', RHOAuthToken, methods=('POST',))
_bp.add_url_rule('/oauth/introspect', 'oauth_introspect', RHOAuthIntrospect, methods=('POST',))
_bp.add_url_rule('/oauth/revoke', 'oauth_revoke', RHOAuthRevoke, methods=('POST',))
# Server administration
_bp.add_url_rule('/admin/apps/', 'apps', RHOAuthAdmin)
_bp.add_url_rule('/admin/apps/new', 'app_new', RHOAuthAdminApplicationNew, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/apps/<int:id>/', 'app_details', RHOAuthAdminApplication, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/apps/<int:id>/delete', 'app_delete', RHOAuthAdminApplicationDelete, methods=('POST',))
_bp.add_url_rule('/admin/apps/<int:id>/reset', 'app_reset', RHOAuthAdminApplicationReset, methods=('POST',))
_bp.add_url_rule('/admin/apps/<int:id>/revoke', 'app_revoke', RHOAuthAdminApplicationRevoke, methods=('POST',))
# User profile
with _bp.add_prefixed_rules('/user/<int:user_id>', '/user'):
_bp.add_url_rule('/applications/', 'user_profile', RHOAuthUserProfile)
_bp.add_url_rule('/applications/<int:id>/revoke', 'user_app_revoke', RHOAuthUserAppRevoke, methods=('POST',))
@_bp.url_defaults
def _add_user_id(endpoint, values):
if endpoint in {'oauth.user_profile', 'oauth.user_token_revoke'} and 'user_id' not in values:
# Inject user id if it's present in the url
values['user_id'] = request.view_args.get('user_id')
|
mfem/PyMFEM
|
mfem/_par/eltrans.py
|
Python
|
bsd-3-clause
| 26,581
| 0.007148
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _eltrans
else:
import _eltrans
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _eltrans.SWIG_PyInstanceMethod_New
_swig_new_static_method = _eltrans.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.globals
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.vector
import mfem._par.densemat
import mfem._par.operators
import mfem._par.matrix
import mfem._par.fe
import mfem._par.geom
import mfem._par.intrules
import mfem._par.sparsemat
import mfem._par.fe_base
import mfem._par.fe_fixed_order
import mfem._par.element
import mfem._par.table
import mfem._par.hash
import mfem._par.fe_h1
import mfem._par.fe_nd
import mfem._par.fe_rt
import mfem._par.fe_l2
import mfem._par.fe_nurbs
import mfem._par.fe_pos
import mfem._par.fe_ser
class ElementTransformation(object):
r"""Proxy of C++ mfem::ElementTransformation class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
ELEMENT = _eltrans.ElementTransformation_ELEMENT
BDR_ELEMENT = _eltrans.ElementTransformation_BDR_ELEMENT
EDGE = _eltrans.ElementTransformation_EDGE
FACE = _eltrans.ElementTransformation_FACE
BDR_FACE = _eltrans.ElementTransformation_BDR_FACE
Attribute = property(_eltrans.ElementTransformation_Attribute_get, _eltrans.ElementTransformation_Attribute_set, doc=r"""Attribute : int""")
ElementNo = property(_eltrans.ElementTransformation_ElementNo_get, _eltrans.ElementTransformation_ElementNo_set, doc=r"""ElementNo : int""")
ElementType = property(_eltrans.ElementTransformation_ElementType_get, _eltrans.ElementTransfor
|
mation_ElementType_set, doc=r"""ElementType : int""")
mesh = property(_eltrans.ElementTransformation_mesh_get, _eltrans.ElementTransformation_mesh_set, doc=r"""mesh : p.mfem::Mesh""")
def Reset(self):
r"""Reset(ElementTransformation self)"""
return _eltrans.ElementTransformation_Reset(self)
Reset = _swig_new_instance_method(_eltrans.E
|
lementTransformation_Reset)
def SetIntPoint(self, ip):
r"""SetIntPoint(ElementTransformation self, IntegrationPoint ip)"""
return _eltrans.ElementTransformation_SetIntPoint(self, ip)
SetIntPoint = _swig_new_instance_method(_eltrans.ElementTransformation_SetIntPoint)
def GetIntPoint(self):
r"""GetIntPoint(ElementTransformation self) -> IntegrationPoint"""
return _eltrans.ElementTransformation_GetIntPoint(self)
GetIntPoint = _swig_new_instance_method(_eltrans.ElementTransformation_GetIntPoint)
def Transform(self, *args):
from .vector import Vector
from .intrules import IntegrationPoint
if isinstance(args[0], IntegrationPoint):
vec = Vector()
_eltrans.ElementTransformation_Transform(self, args[0], vec)
ret = vec.GetDataArray().copy()
return ret
else:
return _eltrans.ElementTransformation_Transform(self, *args)
def Jacobian(self):
r"""Jacobian(ElementTransformation self) -> DenseMatrix"""
return _eltrans.ElementTransformation_Jacobian(self)
Jacobian = _swig_new_instance_method(_eltrans.ElementTransformation_Jacobian)
def Hessian(self):
r"""Hessian(ElementTransformation self) -> DenseMatrix"""
return _eltrans.ElementTransformation_Hessian(self)
Hessian = _swig_new_instance_method(_eltrans.ElementTransformation_Hessian)
def Weight(self):
r"""Weight(ElementTransformation self) -> double"""
return _eltrans.ElementTransformation_Weight(self)
Weight = _swig_new_instance_method(_eltrans.ElementTransformation_Weight)
def AdjugateJacobian(self):
r"""AdjugateJacobian(ElementTransformation self) -> DenseMatrix"""
return _eltrans.ElementTransformation_AdjugateJacobian(self)
AdjugateJacobian = _swig_new_instance_method(_eltrans.ElementTransformation_AdjugateJacobian)
def InverseJacobian(self):
r"""InverseJacobian(ElementTransformation self) -> DenseMatrix"""
return _eltrans.ElementTransformation_InverseJacobian(self)
InverseJacobian = _swig_new_instance_method(_eltrans.ElementTransformation_InverseJacobian)
def Order(self):
r"""Order(ElementTransformation self) -> int"""
return _eltrans.ElementTransformation_Order(self)
Order = _swig_new_instance_method(_eltrans.ElementTransformation_Order)
def OrderJ(self):
r"""OrderJ(ElementTransformation self) -> int"""
return _eltrans.ElementTransformation_OrderJ(self)
OrderJ = _swig_new_instance_method(_eltrans.ElementTransformation_OrderJ)
def OrderW(self):
r"""OrderW(ElementTransformation self) -> int"""
return _eltrans.ElementTransformation_OrderW(self)
OrderW = _swig_new_instance_method(_eltrans.ElementTransformation_OrderW)
def OrderGrad(self, fe):
r"""OrderGrad(ElementTransformation self, FiniteElement fe) -> int"""
return _eltrans.ElementTransformation_OrderGrad(self, fe)
OrderGrad = _swig_new_instance_method(_eltrans.ElementTransformation_OrderGrad)
def GetGeometryType(self):
r"""GetGeometryType(ElementTransformation self) -> mfem::Geometry::Type"""
return _eltrans.ElementTransformation_GetGeometryType(self)
GetGeometryType = _swig_new_instance_method(_eltrans.ElementTransformation_GetGeometryType)
def GetDimension(self):
r"""GetDimension(ElementTransformation self) -> int"""
return _eltrans.ElementTransformation_GetDimension(self)
GetDimension = _swig_new_instance_method(_eltrans.ElementTransformation_GetDimension)
def GetSpaceDim(self):
r"""GetSpaceDim(ElementTransformation self) -> int"""
return _eltrans.ElementTransformation_GetSpaceDim(self)
GetSpaceDim = _swig_new_instance_method(_eltrans.ElementTransformation_GetSpaceDim)
def TransformBack(self, pt, ip):
r"""TransformBack(ElementTransformation self, Vector pt, IntegrationPoint ip) -> int"""
retur
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.