repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
jonathansick/androcmd
|
scripts/dust_grid.py
|
Python
|
mit
| 2,095
| 0
|
#!/usr/bin/env python
# encoding: utf-8
"""
Make a grid of synths for a set of attenuations.
2015-04-30 - Created by Jonathan Sick
"""
import argparse
import numpy as np
from starfisher.pipeline import PipelineBase
from androcmd.planes import BasicPhatPlanes
from androcmd.phatpipeline import (
SolarZIsocs, SolarLockfile,
PhatGaussianDust, PhatCrowding)
from androcmd.phatpipeline import PhatCatalog
def main():
args = parse_args()
av_grid = np.arange(0., args.max_av, args.delta_av)
if args.av is not None:
av = float(args.av)
run_pipeline(brick=args.brick, av=av, run_fit=args.fit)
else:
for av in av_grid:
run_pipeline(brick=args.brick, av=av, run_fit=args.fit)
def parse_args():
parser = argparse.ArgumentParser(
description="Grid of synths for a set of Av
|
")
parser.add_argument('brick', type=int)
parser.add_argument('--max-av', type=float, default=1.5)
parser.add_argument('--delta-av', type=float, default=0.1)
parser.add_argument('--fit', action='store_true', default=False)
parser.add_argument('--av', default=None)
return parser.parse_args()
def run_pipeline(brick=23, av=0., run_fit=False):
dataset = PhatCatalog(brick)
pipel
|
ine = Pipeline(root_dir="b{0:d}_{1:.2f}".format(brick, av),
young_av=av, old_av=av, av_sigma_ratio=0.25,
isoc_args=dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang'))
print(pipeline)
print('av {0:.1f} done'.format(av))
if run_fit:
pipeline.fit('f475w_f160w', ['f475w_f160w'], dataset)
pipeline.fit('rgb', ['f475w_f814w_rgb'], dataset)
pipeline.fit('ms', ['f475w_f814w_ms'], dataset)
class Pipeline(BasicPhatPlanes, SolarZIsocs,
SolarLockfile, PhatGaussianDust, PhatCrowding, PipelineBase):
"""A pipeline for fitting PHAT bricks with solar metallicity isochrones."""
def __init__(self, **kwargs):
super(Pipeline, self).__init__(**kwargs)
if __name__ == '__main__':
main()
|
jendrikseipp/rednotebook
|
tests/test_filesystem.py
|
Python
|
gpl-2.0
| 420
| 0
|
import os
from rednotebook.util.filesystem import get_journal_title
def test_journal_title():
root = os.path.abspath(os.sep)
dirs = [
("/home/my journal", "my journal"),
("/my journal/", "my journal"),
("/home/name/Journal", "Journal"),
("/home/name/jörnal",
|
"jörnal"),
(root, root),
]
for path, title in dirs
|
:
assert get_journal_title(path) == title
|
google/tangent
|
tests/test_compile.py
|
Python
|
apache-2.0
| 1,290
| 0.007752
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an
|
"AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import gast
import pytest
from tangent import compile as compile_
from tangent import quoting
def test_compile():
def f(x):
return x * 2
f = c
|
ompile_.compile_function(quoting.parse_function(f))
assert f(2) == 4
assert inspect.getsource(f).split('\n')[0] == 'def f(x):'
def f(x):
return y * 2
f = compile_.compile_function(quoting.parse_function(f), {'y': 3})
assert f(2) == 6
def test_function_compile():
with pytest.raises(TypeError):
compile_.compile_function(quoting.quote('x = y'))
with pytest.raises(ValueError):
compile_.compile_function(gast.parse('x = y'))
if __name__ == '__main__':
assert not pytest.main([__file__])
|
KDD-OpenSource/fexum
|
features/apps.py
|
Python
|
mit
| 145
| 0
|
from django.a
|
pps import AppConfig
class FeaturesConfig(AppConfig):
name = 'features'
def ready(self):
|
import features.signals
|
priya-pp/Tacker
|
tacker/db/migration/alembic_migrations/versions/5246a6bd410f_multisite_vim.py
|
Python
|
apache-2.0
| 2,711
| 0.001475
|
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""multisite_vim
Revision ID: 5246a6bd410f
Revises: 24bec5f211c7
Create Date: 2016-03-22 14:05:15.129330
"""
# revision identifiers, used by Alembic.
revision = '5246a6bd410f'
down_revision = '24bec5f211c7'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.create_table('vims',
sa.Column('id', sa.String(length=255), nullable=False),
sa.Column('type', sa.String(length=255), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Co
|
lumn('placement_attr', sa.PickleType(), nullable=True),
sa.Column('shared', sa.Boolean(), ser
|
ver_default=sa.text(u'true'),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB'
)
op.create_table('vimauths',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vim_id', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=128), nullable=False),
sa.Column('auth_url', sa.String(length=255), nullable=False),
sa.Column('vim_project', sa.PickleType(), nullable=False),
sa.Column('auth_cred', sa.PickleType(), nullable=False),
sa.ForeignKeyConstraint(['vim_id'], ['vims.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('auth_url')
)
op.add_column(u'devices', sa.Column('placement_attr', sa.PickleType(),
nullable=True))
op.add_column(u'devices', sa.Column('vim_id', sa.String(length=36),
nullable=False))
op.create_foreign_key(None, 'devices', 'vims', ['vim_id'], ['id'])
def downgrade(active_plugins=None, options=None):
op.drop_constraint(None, 'devices', type_='foreignkey')
op.drop_column(u'devices', 'vim_id')
op.drop_column(u'devices', 'placement_attr')
op.drop_table('vimauths')
op.drop_table('vims')
|
kpj/PyWave
|
model.py
|
Python
|
mit
| 5,052
| 0.001781
|
"""
Implementation of model
"""
import numpy as np
import numpy.random as npr
from scipy import ndimage
from configuration import get_config
config = get_config()
class LatticeState(object):
""" Treat 1D list as 2D lattice and handle coupled system
This helps wit
|
h simply passing this object to scipy's odeint
"""
def __init__(self, width, height, pacemakers=[]):
""" Initialize lattice
"""
self.width = width
self.height = height
self.pacemakers = pacemakers
self.discrete_laplacian = np.ones((3, 3)) * 1/2
|
self.discrete_laplacian[1, 1] = -4
self.state_matrix = np.zeros((width, height))
self.tau_matrix = np.ones((width, height)) * (-config.t_arp) # in ARP
def _update_state_matrix(self, camp, exci):
""" Compute state matrix value, with
quiescent/refractory cell -> 0
firing cell -> 1
"""
# this function gets executed once per timestep
for j in range(self.width):
for i in range(self.height):
if self.state_matrix[i, j] == 0: # not firing
self.handle_off_cell(i, j, camp, exci)
else: # firing
self.handle_on_cell(i, j)
def handle_on_cell(self, i, j):
""" Handle cell where state_matrix == 1
"""
self.tau_matrix[i, j] += config.dt
if self.tau_matrix[i, j] >= 0: # end of firing reached
self.state_matrix[i, j] = 0
self.tau_matrix[i, j] = -config.t_arp
def handle_off_cell(self, i, j, camp, exci):
""" Handle cell where state_matrix == 0
"""
tau = self.tau_matrix[i, j]
if tau >= 0: # in RRP
A = ((config.t_rrp + config.t_arp) \
* (config.c_max - config.c_min)) / config.t_rrp
t = (config.c_max - A * (tau / (tau + config.t_arp))) \
* (1 - exci[i, j])
# increase time up to t_rrp
if tau < config.t_rrp:
self.tau_matrix[i, j] += config.dt
# check threshold
if camp[i, j] > t:
self.fire_cell(i, j)
# handle pacemaker
if (i, j) in self.pacemakers and npr.random() < config.p:
self.fire_cell(i, j)
else: # in ARP
self.tau_matrix[i, j] += config.dt
def fire_cell(self, i, j):
""" Fire cell `i`x`j`
"""
self.state_matrix[i, j] = 1
self.tau_matrix[i, j] = -config.t_f
def get_size(self):
""" Return number of cells in underlying system
"""
return self.width * self.height
def _state_vec2camp_exci(self, state):
""" Convert ODE state vector to cAMP and excitability matrices
"""
flat_camp = state[:self.get_size()]
flat_exci = state[self.get_size():]
camp = np.reshape(flat_camp, (self.width, self.height))
exci = np.reshape(flat_exci, (self.width, self.height))
return camp, exci
def _camp_exci2state_vec(self, camp, exci):
""" Reverse of `_state_vec2camp_exci`
"""
flat_camp = np.reshape(camp, self.get_size())
flat_exci = np.reshape(exci, self.get_size())
return np.append(flat_camp, flat_exci)
def get_ode(self, state, t):
""" Return corresponding ODE
Structure:
[
camp00, camp01, .. ,camp0m, camp10, .., campnm
...
exci00, exci01, .. ,exci0m, exci10, .., excinm
]
"""
# parse ODE state
camp, exci = self._state_vec2camp_exci(state)
# compute next iteration
self._update_state_matrix(camp, exci)
next_camp = np.zeros((self.width, self.height))
next_exci = np.zeros((self.width, self.height))
laplacian_conv = ndimage.convolve(
camp, self.discrete_laplacian,
mode='constant', cval=0.0
)
for j in range(self.width):
for i in range(self.height):
next_camp[i, j] = -config.gamma * camp[i, j] \
+ config.r * self.state_matrix[i, j] \
+ config.D * laplacian_conv[i, j]
if exci[i, j] < config.e_max:
next_exci[i, j] = config.eta + config.beta * camp[i, j]
return self._camp_exci2state_vec(next_camp, next_exci)
def parse_result(self, orig_res):
""" Parse integration result
"""
t_range = len(orig_res)
res = orig_res.T
flat_camp = res[:self.get_size()].reshape(self.get_size() * t_range)
flat_exci = res[self.get_size():].reshape(self.get_size() * t_range)
camp = np.reshape(flat_camp, (self.width, self.height, t_range))
exci = np.reshape(flat_exci, (self.width, self.height, t_range))
return camp, exci
def __repr__(self):
""" Nice visual representation of lattice
"""
return '%dx%d' % (self.width, self.height)
|
mwickert/scikit-dsp-comm
|
sk_dsp_comm/test/test_imports.py
|
Python
|
bsd-2-clause
| 1,438
| 0.000695
|
from unittest import TestCase
class TestImports(TestCase):
_multiprocess_can_split_ = True
def test_coeff2header_import(self):
import sk_dsp_comm.coeff2header
def test_coeff2header_from(self):
from sk_dsp_comm import coeff2header
def test_digitalcom_import(self):
import sk_dsp_comm.digitalcom
def test_digitalcom_from(self):
from sk_dsp_comm import digitalcom
def test_fec_conv_import(self):
import sk_dsp_comm.fec_conv
def test_fec_conv_from(self):
from sk_dsp_comm import digitalcom
def test_fir_design_helper_import(self):
from sk_dsp_comm imp
|
ort fir_design_helper
def test_fir_design_helper_from(self):
import sk_dsp_comm.fir_design_helper
def test_iir_design_helper_from(self):
from sk_dsp
|
_comm import iir_design_helper
def test_iir_design_helper_import(self):
import sk_dsp_comm.iir_design_helper
def test_multirate_helper_from(self):
from sk_dsp_comm import multirate_helper
def test_multirate_helper_import(self):
import sk_dsp_comm.multirate_helper
def test_sigsys_from(self):
from sk_dsp_comm import sigsys
def test_sigsys_import(self):
import sk_dsp_comm.sigsys
def test_synchronization_from(self):
from sk_dsp_comm import synchronization
def test_synchronization_import(self):
import sk_dsp_comm.synchronization
|
mlml/autovot
|
autovot/bin/auto_vot_append_files.py
|
Python
|
lgpl-3.0
| 4,617
| 0.003682
|
#! /usr/bin/env python3
#
# Copyright (c) 2014 Joseph Keshet, Morgan Sonderegger, Thea Knowles
#
# This file is part of Autovot, a package for automatic extraction of
# voice onset time (VOT) from audio files.
#
# Autovot is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Autovot is distributed in the hope that it will be useful, but
# WITHOUT AN
|
Y WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy o
|
f the GNU Lesser General Public
# License along with Autovot. If not, see
# <http://www.gnu.org/licenses/>.
#
# auto_vot_append_files.py : Append set of features and labels
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import open
from builtins import int
from future import standard_library
standard_library.install_aliases()
import argparse
from helpers.utilities import *
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(description='Append set of features and labels')
parser.add_argument('features_filename', help="front end features filename")
parser.add_argument('labels_filename', help="front end labels filename")
parser.add_argument('appended_features_filename', help="front end features filename to be appended")
parser.add_argument('appended_labels_filename', help="front end labels filename to be appended")
parser.add_argument("--logging_level", help="Level of verbosity of information printed out by this program ("
"DEBUG, INFO, WARNING or ERROR), in order of increasing verbosity. "
"See http://docs.python.org/2/howto/logging for definitions. ("
"default: %(default)s)", default="INFO")
args = parser.parse_args()
logging_defaults(args.logging_level)
# open files
in_features = open(args.features_filename, 'r')
in_labels = open(args.labels_filename, 'r')
# read infra text header
header = in_labels.readline()
dims = header.split()
# read file lines
lines = list()
for x, y in zip(in_features, in_labels):
lines.append((x, y))
# close files
in_features.close()
in_labels.close()
if len(lines) != int(dims[0]):
logging.error("%s and %s are not of the same length or %s is missing a header" % (args.features_filename,
args.labels_filename,
args.labels_filename))
exit(-1)
try:
# try to open the files
app_features = open(args.appended_features_filename, 'r')
app_labels = open(args.appended_labels_filename, 'r')
# now read the appended files
app_features = open(args.appended_features_filename, 'r')
app_labels = open(args.appended_labels_filename, 'r')
# read infra text header
app_header = app_labels.readline()
app_dims = app_header.split()
# read file to lines
for x, y in zip(app_features, app_labels):
lines.append((x, y))
# close files
in_features.close()
in_labels.close()
# assert header
if len(lines) != int(dims[0])+int(app_dims[0]):
logging.error("Something wrong with the header of %s" % args.appended_labels_filename)
exit(-1)
except Exception as exception:
if exception.errno != 2:
logging.error("Something wrong with opening %s and %s for reading." % (args.appended_features_filename,
args.appended_labels_filename))
# open appended files for writing
out_features = open(args.appended_features_filename, 'w')
out_labels = open(args.appended_labels_filename, 'w')
# write labels header
header = "%d 2\n" % len(lines)
out_labels.write(header)
# write data
for x, y in lines:
out_features.write(x)
out_labels.write(y)
# close files
out_features.close()
out_labels.close()
|
otron/zenodo
|
zenodo/demosite/receivers.py
|
Python
|
gpl-3.0
| 2,660
| 0.005639
|
# -*- coding: utf-8 -*-
#
## This file is part of Zenodo.
## Copyright (C) 2012, 2013, 2014 CERN.
##
## Zenodo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the Lice
|
nse, or
## (at your option) any later version.
##
## Zenodo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along w
|
ith Zenodo. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
import os
import shutil
from flask import current_app
from invenio.base.factory import with_app_context
@with_app_context(new_context=True)
def post_handler_database_create(sender, default_data='', *args, **kwargs):
"""Load data after demosite creation."""
from invenio.modules.communities.models import Community
print(">>> Creating collections for communities...")
c = Community.query.filter_by(id='zenodo').first()
c.save_collections()
c = Community.query.filter_by(id='ecfunded').first()
c.save_collections()
print(">>> Fixing dbquery for root collection.")
from invenio.modules.search.models import Collection
from invenio.ext.sqlalchemy import db
c = Collection.query.filter_by(id=1).first()
c.dbquery = '980__a:0->Z AND NOT 980__a:PROVISIONAL AND NOT ' \
'980__a:PENDING AND NOT 980__a:SPAM AND NOT 980__a:REJECTED ' \
'AND NOT 980__a:DARK'
db.session.commit()
@with_app_context(new_context=True)
def clean_data_files(sender, *args, **kwargs):
"""Clean data in directories."""
dirs = [
current_app.config['DEPOSIT_STORAGEDIR'],
current_app.config['CFG_TMPDIR'],
current_app.config['CFG_TMPSHAREDDIR'],
current_app.config['CFG_LOGDIR'],
current_app.config['CFG_CACHEDIR'],
current_app.config['CFG_RUNDIR'],
current_app.config['CFG_BIBDOCFILE_FILEDIR'],
]
for d in dirs:
print(">>> Cleaning {0}".format(d))
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
@with_app_context(new_context=True)
def post_handler_demosite_populate(sender, default_data='', *args, **kwargs):
"""Load data after records are created."""
|
jterrace/sphinxtr
|
extensions/natbib/latex_codec.py
|
Python
|
bsd-2-clause
| 15,250
| 0.003607
|
"""latex.py
Character translation utilities for LaTeX-formatted text.
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
We also make public a dictionary latex_equivalents,
mapping ord(unicode char) to LaTeX code.
D. Eppstein, October 2003.
"""
from __future__ import generators
import codecs
import re
from backports import Set
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(_registry)
def getregentry():
"""Encodings module API."""
return _registry('latex')
def _registry(encoding):
if encoding == 'latex':
encoding = None
elif encoding.startswith('latex+'):
encoding = encoding[6:]
else:
return None
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
"""Convert unicode string to latex."""
output = []
for c in input:
if encoding:
try:
output.append(c.encode(encoding))
continue
except:
pass
if ord(c) in latex_equivalents:
output.append(latex_equivalents[ord(c)])
else:
output += ['{\\char', str(ord(c)), '}']
return ''.join(output), len(input)
def decode(self,input,errors='strict'):
"""Convert latex source string to unicode."""
if encoding:
input = unicode(input,encoding,errors)
# Note: we may get buffer objects here.
# It is not permussable to call join on buffer objects
# but we can make them joinable by calling unicode.
# This should always be safe since we are supposed
# to be producing unicode output anyway.
x = map(unicode,_unlatex(input))
return u''.join(x), len(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
def _tokenize(tex):
"""Convert latex source into sequence of single-token substrings."""
start = 0
try:
# skip quickly across boring stuff
pos = _stoppers.finditer(tex).next().span()[0]
except StopIteration:
yield tex
return
while 1:
if pos > start:
yield tex[start:pos]
if tex[start] == '\\' and not (tex[pos-1].isdigit() and tex[start+1].isalpha()):
while pos < len(tex) and tex[pos].isspace(): # skip blanks after csname
pos += 1
while pos < len(tex) and tex[pos] in _ignore:
pos += 1 # flush control characters
if pos >= len(tex):
return
start = pos
if tex[pos:pos+2] in {'$$':None, '/~':None}: # protect ~ in urls
pos += 2
elif tex[pos].isdigit():
while pos < len(tex) and tex[pos].isdigit():
pos += 1
elif tex[pos] == '-':
while pos < len(tex) and tex[pos] == '-':
pos += 1
elif tex[pos] != '\\' or pos == len(tex) - 1:
pos += 1
elif not tex[pos+1].isalpha():
pos += 2
else:
pos += 1
while pos < len(tex) and tex[pos].isalpha():
pos += 1
if tex[start:pos] == '\\char' or tex[start:pos] == '\\accent':
while pos < len(tex) and tex[pos].isdigit():
pos += 1
class _unlatex:
"""Convert tokenized tex into sequence of unicode strings. Helper for decode()."""
def __iter_
|
_(self):
"""Turn self into an iterator. It already is one, nothing to do."""
return self
def __init__(self,tex):
"""Create a new token converter from a string."""
self.tex = tuple(_tokenize(tex)) # turn tokens into indexable list
self.pos = 0 # index of first unprocessed to
|
ken
self.lastoutput = 'x' # lastoutput must always be nonempty string
def __getitem__(self,n):
"""Return token at offset n from current pos."""
p = self.pos + n
t = self.tex
return p < len(t) and t[p] or None
def next(self):
"""Find and return another piece of converted output."""
if self.pos >= len(self.tex):
raise StopIteration
nextoutput = self.chunk()
if self.lastoutput[0] == '\\' and self.lastoutput[-1].isalpha() and nextoutput[0].isalpha():
nextoutput = ' ' + nextoutput # add extra space to terminate csname
self.lastoutput = nextoutput
return nextoutput
def chunk(self):
"""Grab another set of input tokens and convert them to an output string."""
for delta,c in self.candidates(0):
if c in _l2u:
self.pos += delta
return unichr(_l2u[c])
elif len(c) == 2 and c[1] == 'i' and (c[0],'\\i') in _l2u:
self.pos += delta # correct failure to undot i
return unichr(_l2u[(c[0],'\\i')])
elif len(c) == 1 and c[0].startswith('\\char') and c[0][5:].isdigit():
self.pos += delta
return unichr(int(c[0][5:]))
# nothing matches, just pass through token as-is
self.pos += 1
return self[-1]
def candidates(self,offset):
"""Generate pairs delta,c where c is a token or tuple of tokens from tex
(after deleting extraneous brackets starting at pos) and delta
is the length of the tokens prior to bracket deletion.
"""
t = self[offset]
if t in _blacklist:
return
elif t == '{':
for delta,c in self.candidates(offset+1):
if self[offset+delta+1] == '}':
yield delta+2,c
elif t == '\\mbox':
for delta,c in self.candidates(offset+1):
yield delta+1,c
elif t == '$' and self[offset+2] == '$':
yield 3, (t,self[offset+1],t)
else:
q = self[offset+1]
if q == '{' and self[offset+3] == '}':
yield 4, (t,self[offset+2])
elif q:
yield 2, (t,q)
yield 1, t
latex_equivalents = {
0x0009: ' ',
0x000a: '\n',
0x0023: '{\#}',
0x0026: '{\&}',
0x00a0: '{~}',
0x00a1: '{!`}',
0x00a2: '{\\not{c}}',
0x00a3: '{\\pounds}',
0x00a7: '{\\S}',
0x00a8: '{\\"{}}',
0x00a9: '{\\copyright}',
0x00af: '{\\={}}',
0x00ac: '{\\neg}',
0x00ad: '{\\-}',
0x00b0: '{\\mbox{$^\\circ$}}',
0x00b1: '{\\mbox{$\\pm$}}',
0x00b2: '{\\mbox{$^2$}}',
0x00b3: '{\\mbox{$^3$}}',
0x00b4: "{\\'{}}",
0x00b5: '{\\mbox{$\\mu$}}',
0x00b6: '{\\P}',
0x00b7: '{\\mbox{$\\cdot$}}',
0x00b8: '{\\c{}}',
0x00b9: '{\\mbox{$^1$}}',
0x00bf: '{?`}',
0x00c0: '{\\`A}',
0x00c1: "{\\'A}",
0x00c2: '{\\^A}',
0x00c3: '{\\~A}',
0x00c4: '{\\"A}',
0x00c5: '{\\AA}',
0x00c6: '{\\AE}',
0x00c7: '{\\c{C}}',
0x00c8: '{\\`E}',
0x00c9: "{\\'E}",
0x00ca: '{\\^E}',
0x00cb: '{\\"E}',
0x00cc: '{\\`I}',
0x00cd: "{\\'I}",
0x00ce: '{\\^I}',
0x00cf: '{\\"I}',
0x00d1: '{\\~N}',
0x00d2: '{\\`O}',
0x00d3: "{\\'O}",
0x00d4: '{\\^O}',
0x00d5: '{\\~O}',
0x00d6: '{\\"O}',
0x00d7: '{\\mbox{$\\times$}}',
0x00d8: '{\\O}',
0x00d9: '{\\`U}',
0x00da: "{\\'U}",
0x00db: '{\\^U}',
0x00dc: '{\\"U}',
0x00dd: "{\\'Y}",
0x00df: '{\\ss}',
0x00e0: '{\\`a}',
0x00e1: "{\\'a}",
0x00e2: '{\\^a}',
0x00e3: '{\\~a}',
0x00e4: '{\\"a}',
0x00e5: '{\\aa}',
0x
|
YakindanEgitim/EN-LinuxClipper
|
thrift/transport/TZlibTransport.py
|
Python
|
gpl-3.0
| 8,187
| 0.006596
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
TZlibTransport provides a compressed transport and transport factory
class, using the python standard library zlib module to implement
data compression.
'''
from __future__ import division
import zlib
from cStringIO import StringIO
from TTransport import TTransportBase, CReadableTransport
class TZlibTransportFactory(object):
'''
Factory transport that builds zlib compressed transports.
This factory caches the last single client/transport that it was passed
and returns the same TZlibTransport object that was created.
This caching means the TServer class will get the _same_ transport
object for both input and output transports from this factory.
(F
|
or non-threaded scenarios only, since the cache only holds one object)
The purpose of this caching is to allocate only one TZlibTransport where
only one is really needed (since it must have separate read/write buffers),
and makes the statistics from getCompSavings() and getCompRatio()
easier to understand.
'''
# class sc
|
oped cache of last transport given and zlibtransport returned
_last_trans = None
_last_z = None
def getTransport(self, trans, compresslevel=9):
'''Wrap a transport , trans, with the TZlibTransport
compressed transport class, returning a new
transport to the caller.
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Defaults to 9.
@type compresslevel: int
This method returns a TZlibTransport which wraps the
passed C{trans} TTransport derived instance.
'''
if trans == self._last_trans:
return self._last_z
ztrans = TZlibTransport(trans, compresslevel)
self._last_trans = trans
self._last_z = ztrans
return ztrans
class TZlibTransport(TTransportBase, CReadableTransport):
'''
Class that wraps a transport with zlib, compressing writes
and decompresses reads, using the python standard
library zlib module.
'''
# Read buffer size for the python fastbinary C extension,
# the TBinaryProtocolAccelerated class.
DEFAULT_BUFFSIZE = 4096
def __init__(self, trans, compresslevel=9):
'''
Create a new TZlibTransport, wrapping C{trans}, another
TTransport derived object.
@param trans: A thrift transport object, i.e. a TSocket() object.
@type trans: TTransport
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Default is 9.
@type compresslevel: int
'''
self.__trans = trans
self.compresslevel = compresslevel
self.__rbuf = StringIO()
self.__wbuf = StringIO()
self._init_zlib()
self._init_stats()
def _reinit_buffers(self):
'''
Internal method to initialize/reset the internal StringIO objects
for read and write buffers.
'''
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def _init_stats(self):
'''
Internal method to reset the internal statistics counters
for compression ratios and bandwidth savings.
'''
self.bytes_in = 0
self.bytes_out = 0
self.bytes_in_comp = 0
self.bytes_out_comp = 0
def _init_zlib(self):
'''
Internal method for setting up the zlib compression and
decompression objects.
'''
self._zcomp_read = zlib.decompressobj()
self._zcomp_write = zlib.compressobj(self.compresslevel)
def getCompRatio(self):
'''
Get the current measured compression ratios (in,out) from
this transport.
Returns a tuple of:
(inbound_compression_ratio, outbound_compression_ratio)
The compression ratios are computed as:
compressed / uncompressed
E.g., data that compresses by 10x will have a ratio of: 0.10
and data that compresses to half of ts original size will
have a ratio of 0.5
None is returned if no bytes have yet been processed in
a particular direction.
'''
r_percent, w_percent = (None, None)
if self.bytes_in > 0:
r_percent = self.bytes_in_comp / self.bytes_in
if self.bytes_out > 0:
w_percent = self.bytes_out_comp / self.bytes_out
return (r_percent, w_percent)
def getCompSavings(self):
'''
Get the current count of saved bytes due to data
compression.
Returns a tuple of:
(inbound_saved_bytes, outbound_saved_bytes)
Note: if compression is actually expanding your
data (only likely with very tiny thrift objects), then
the values returned will be negative.
'''
r_saved = self.bytes_in - self.bytes_in_comp
w_saved = self.bytes_out - self.bytes_out_comp
return (r_saved, w_saved)
def isOpen(self):
'''Return the underlying transport's open status'''
return self.__trans.isOpen()
def open(self):
"""Open the underlying transport"""
self._init_stats()
return self.__trans.open()
def listen(self):
'''Invoke the underlying transport's listen() method'''
self.__trans.listen()
def accept(self):
'''Accept connections on the underlying transport'''
return self.__trans.accept()
def close(self):
'''Close the underlying transport,'''
self._reinit_buffers()
self._init_zlib()
return self.__trans.close()
def read(self, sz):
'''
Read up to sz bytes from the decompressed bytes buffer, and
read from the underlying transport if the decompression
buffer is empty.
'''
ret = self.__rbuf.read(sz)
if len(ret) > 0:
return ret
# keep reading from transport until something comes back
while True:
if self.readComp(sz):
break
ret = self.__rbuf.read(sz)
return ret
def readComp(self, sz):
'''
Read compressed data from the underlying transport, then
decompress it and append it to the internal StringIO read buffer
'''
zbuf = self.__trans.read(sz)
zbuf = self._zcomp_read.unconsumed_tail + zbuf
buf = self._zcomp_read.decompress(zbuf)
self.bytes_in += len(zbuf)
self.bytes_in_comp += len(buf)
old = self.__rbuf.read()
self.__rbuf = StringIO(old + buf)
if len(old) + len(buf) == 0:
return False
return True
def write(self, buf):
'''
Write some bytes, putting them into the internal write
buffer for eventual compression.
'''
self.__wbuf.write(buf)
def flush(self):
'''
Flush any queued up data in the write buffer and ensure the
compression buffer is flushed out to the underlying transport
'''
wout = self.__wbuf.getvalue()
if len(wout) > 0:
zbuf = self._zcomp_write.compress(wout)
self.bytes_out += len(wout)
self.bytes_out_comp += len(zbuf)
else:
zbuf = ''
ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH)
self.bytes_out_comp += len(ztail)
if (len(zbuf) + len(ztail)) > 0:
self.__wbuf = StringIO()
self.__trans.write(zbuf + ztail)
self.__trans.flush()
@property
def cstringio_buf(self):
'''Implement the CReadableTransport interface'''
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
'''Implement the CReadableTransport interface for refill'''
retstring = partialread
if reqlen < self.DEFAULT_BUFFSIZE:
retstring += self.read(self.DEFAULT_BUFFSIZE)
while len(retstring) < reqlen:
retstring += self.read(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.
|
DrOctogon/appscake-rewrite
|
config/wsgi.py
|
Python
|
bsd-3-clause
| 1,421
| 0.000704
|
"""
WSGI config for appscake project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE alr
|
eady in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi dae
|
mon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
SergeyCherepanov/ansible
|
ansible/paramiko/_version.py
|
Python
|
mit
| 80
| 0
|
__v
|
ersion_info__ = (2, 4, 2)
__version__ = ".".join(map(str, __version
|
_info__))
|
Rassilion/ProjectC
|
web/problems/003/solver.py
|
Python
|
gpl-3.0
| 527
| 0
|
for i in range(1, 101):
print i
asd = open("inp/" + str(i), "r")
s = asd.read()
s
|
= s[:-1]
n = int(s)
print n
if n % 100 != 0:
if n % 4 == 0:
s = "EVET" + "\n"
else:
s = "HAYIR" + "\n"
else:
if n % 400 == 0:
s = "EVET" + "\n"
print "400e bolunme!!!
|
"
else:
s = "HAYIR" + "\n"
print "100 e bolnmede hayir"
asd.close()
asd = open("out/" + str(i), "w")
asd.write(s)
asd.close()
|
cydenix/OpenGLCffi
|
OpenGLCffi/GLES3/EXT/NV/viewport_array.py
|
Python
|
mit
| 1,222
| 0.011457
|
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['first', 'count', 'v'])
def glViewportArrayvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'x', 'y', 'w', 'h'])
def glViewportIndexedfNV(index, x, y, w, h):
pass
@params(api='gles3', prms=['index', 'v'])
def glViewportIndexedfvNV(index, v):
pass
@params(api='gles3', prms=['first', 'count', 'v'])
def glScissorArrayvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'left', 'bottom', 'width', 'height'])
def glScissorIndexedNV
|
(index, left, bottom, width, height):
pass
@params(api='gles3', prms=['index', 'v'])
def glScissorIndexedvNV(index, v):
pass
@params(api='gles3', prms=['first', 'count', 'v'])
def glDept
|
hRangeArrayfvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'n', 'f'])
def glDepthRangeIndexedfNV(index, n, f):
pass
@params(api='gles3', prms=['target', 'index', 'data'])
def glGetFloati_vNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glEnableiNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glDisableiNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glIsEnablediNV(target, index):
pass
|
makennajohnstone/CSS
|
css/forms.py
|
Python
|
mit
| 11,524
| 0.010413
|
from django import forms
from django.core.mail import send_mail
from css.models import CUser, Room, Course, SectionType, Schedule, Section, Availability, FacultyCoursePreferences
from django.http import HttpResponseRedirect
from settings import DEPARTMENT_SETTINGS, HOSTNAME
import re
from django.forms import ModelChoiceField
from django.contrib.auth.models import User
# Login Form
class LoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(label='Password', widget=forms.PasswordInput)
#@TODO validate pass?
@staticmethod
def validate_password(password):
if re.match(r'^(?=.*\d)(?=.*[A-Za-z])(?=.*[-._!@#$%^&*?+])[A-Za-z0-9-._!@#$%^&*?+]{8,32}$', password) is None:
raise ValidationError("Attempted CUser creation with invalid password")
# Invite Form
class InviteUserForm(forms.Form):
email = forms.EmailField()
first_name = forms.CharField()
last_name = forms.CharField()
def send_invite(self, usertype, request):
first_name = self.cleaned_data['first_name']
last_name = self.cleaned_data['last_name']
name = first_name + ' ' + last_name
email = self.cleaned_data['email']
host = request.META['HTTP_HOST']
if not re.search(r'http', host):
host = 'http://' + host
link = host + '/register?first_name=' + first_name +'&last_name=' + last_name +'&user_type='+ usertype + '&email=' + email
send_mail('Invite to register for CSS', name + ", you have been invited to register for CSS. Please register using the following link:\n\n "
+ link, 'registration@inviso-css', [self.cleaned_data['email']])
# Registration Form
# @TODO on load, pull fields from query string -> show failure if field not able to be loaded:
# Fields to pull: email, first_name, last_name, user_type
class RegisterUserForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
user_type = forms.CharField()
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='
|
Confirm Password', widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
if kwargs.pop('request') is "GET":
self.first_name = kwargs.pop('first_name')
self.last_name = kwargs.pop('last_name')
|
self.user_type = kwargs.pop('user_type')
self.email = kwargs.pop('email')
self.declared_fields['first_name'].initial = self.first_name
self.declared_fields['last_name'].initial = self.last_name
self.declared_fields['email'].initial = self.email
self.declared_fields['user_type'].initial = self.user_type
self.declared_fields['user_type'].disabled = True
super(RegisterUserForm, self).__init__(*args,**kwargs)
def save(self):
user = CUser.create(email=self.cleaned_data['email'],
password=self.cleaned_data['password2'],
user_type=self.cleaned_data['user_type'],
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'])
user.save()
return user
# Edit User Form
class EditUserForm(forms.Form):
user_email = forms.CharField(widget=forms.HiddenInput(), initial='default@email.com')
first_name = forms.CharField()
last_name = forms.CharField()
password = forms.CharField()
def save(self):
user = CUser.get_user(email=self.cleaned_data['user_email'])
user.set_first_name(self.cleaned_data['first_name'])
user.set_last_name(self.cleaned_data['last_name'])
user.set_password(self.cleaned_data['password'])
user.save()
return user
# Delete Form
class DeleteUserForm(forms.Form):
email = forms.CharField(label='Confirm email')
def delete_user(self):
email = self.cleaned_data['email']
User.objects.filter(username = self.cleaned_data['email']).delete()
class AddRoomForm(forms.Form):
name = forms.CharField()
description = forms.CharField()
capacity = forms.IntegerField()
notes = forms.CharField()
equipment = forms.CharField()
def save(self):
room = Room.objects.create(name=self.cleaned_data['name'], description=self.cleaned_data['description'], capacity=self.cleaned_data['capacity'], notes=self.cleaned_data['notes'], equipment=self.cleaned_data['equipment'])
room.save()
return room
class EditRoomForm(forms.Form):
name = forms.CharField(widget=forms.HiddenInput(), initial='defaultRoom')
description = forms.CharField()
capacity = forms.IntegerField()
notes = forms.CharField()
equipment = forms.CharField()
def save(self):
nameString = self.cleaned_data['name']
room = Room.get_room(nameString)
room.name = self.cleaned_data['name']
room.description = self.cleaned_data['description']
room.capacity = self.cleaned_data['capacity']
room.notes = self.cleaned_data['notes']
room.equipment = self.cleaned_data['equipment']
room.save()
class DeleteRoomForm(forms.Form):
roomName = forms.CharField(widget=forms.HiddenInput(), initial='defaultRoom')
def deleteRoom(self):
nameString=self.cleaned_data['roomName']
Room.objects.filter(name=nameString).delete()
class EditCourseSectionTypeForm(forms.Form):
work_units = forms.IntegerField()
work_hours = forms.IntegerField()
def save(self):
name = self.cleaned_data['name']
work_units = self.cleaned_data['work_units']
work_hours = self.cleaned_data['work_hours']
class AddCourseSectionTypeForm(forms.Form):
course = forms.CharField(widget=forms.HiddenInput(), initial='defaultCourse')
name = forms. MultipleChoiceField(
required = True,
widget = forms.RadioSelect,
choices = SectionType.get_all_section_types_list
)
work_units = forms.IntegerField()
work_hours = forms.IntegerField()
class AddCourseForm(forms.Form):
course_name = forms.CharField()
description = forms.CharField()
equipment_req = forms.CharField()
def save(self):
course = Course(name = self.cleaned_data['course_name'],
description = self.cleaned_data['description'],
equipment_req = self.cleaned_data['equipment_req'])
course.save();
class DeleteCourseForm(forms.Form):
course_name = forms.CharField(widget=forms.HiddenInput(), initial='defaultCourse')
def save(self):
course = Course.get_course(name=self.cleaned_data['course_name'])
course.delete()
return
# @TODO Fix naming -> EditCourseForm
class EditCourseForm(forms.Form):
course_name = forms.CharField(widget=forms.HiddenInput(), initial='defaultcourse')
equipment_req = forms.CharField()
description = forms.CharField()
def save(self):
course = Course.get_course(name=self.cleaned_data['course_name'])
course.set_equipment_req(self.cleaned_data['equipment_req'])
course.set_description(self.cleaned_data['description'])
class AddSectionTypeForm(forms.Form):
section_type_name = forms.CharField()
def save(self):
SectionType.create(name=self.cleaned_data['section_type_name'])
# Custom ModelChoiceField for faculty full names
class FacultyModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.user.first_name + " " + obj.user.last_name
class AddSectionForm(forms.Form):
academic_term = forms.ModelChoiceField(label='Term', queryset=Schedule.objects.values_list('academic_term', flat=True), empty_label=" ")
course = forms.ModelChoiceField(label='Course', queryset=Course.objects.values_list('name', flat=True), empty_label=" ")
start_time = forms.TimeField(label='Start Time', input_formats=('%I:%M %p'))
end_time = forms.TimeField(label='End Time', input_formats=('%I:%M %p'))
days = forms.CharField(label='Days')
days = forms.ChoiceField(label='Days',
|
Aravinthu/odoo
|
addons/l10n_ch/tests/test_l10n_ch_isr.py
|
Python
|
agpl-3.0
| 5,070
| 0.006509
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.exceptions import ValidationError
class ISRTest(AccountingTestCase):
def create_invoice(self, currency_to_use='base.CHF'):
""" Generates a test invoice """
account_receivable = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1)
currency = self.env.ref(currency_to_use)
partner_agrolait = self.env.ref("base.res_partner_2")
product = self.env.ref("product.product_product_4")
account_revenue = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1)
invoice = self.env['account.invoice'].create({
'partner_id': partner_agrolait.id,
'reference_type': 'none',
'currency_id': currency.id,
'name': 'invoice to client',
'account_id': account_receivable.id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y') + '-12-22',
})
self.env['account.invoice.line'].create({
'product_id': product.id,
'quantity': 1,
'price_unit': 42,
'invoice_id': invoice.id,
'name': 'something',
'account_id': account_revenue.id,
})
invoice.action_invoice_open()
return invoice
def create_account(self, number):
""" Generates a test res.partner.bank. """
return self.env['res.partner.bank'].create({
'acc_number': number
})
def print_isr(self, invoice):
try:
invoice.isr_print()
return True
except ValidationError:
return False
def isr_not_generated(self, invoice):
""" Prints the given invoice and tests that no ISR generation is triggered. """
self.assertFalse(self.print_isr(invoice), 'No ISR should be generated for this invoice')
def isr_generated(self, invoice):
""" Prints the given invoice and tests that an ISR generation is triggered. """
self.assertTrue(self.print_isr(invoice), 'An ISR should have been generated')
def test_l10n_ch_postals(self):
#An account whose number is set to a valid postal number becomes a 'postal'
#account and sets its postal reference field.
account_test_postal_ok = self.create_account('010391391')
self.assertEqual(account_test_postal_ok.acc_type, 'postal', "A valid postal number in acc_number should set its type to 'postal'")
self.assertEqual(account_test_postal_ok.l10n_ch_postal, '010391391', "A postal account should have a postal reference identical to its account number")
#An account whose number is set to a non-postal value should not get the
#'postal' type
account_test_postal_wrong = self.create_account('010391394')
self.assertNotEqual(account_test_postal_wrong.acc_type, 'postal', "A non-postal account cannot be of type 'postal'")
#A swiss IBAN account contains a postal reference
account_test_iban_ok = self.create_account('CH6309000000250097798')
self.assertEqual(account_test_iban_ok.acc_type, 'iban', "The IBA
|
N must be valid")
self.assertEqual(account_test_iban_ok.l10n_ch_postal, '000250097798', "A valid swiss IBAN should set the postal reference")
#A non-swiss IBAN must not allow the computation of a postal reference
account_test_iban_wrong = self.create_account('GR160110
|
1250000000012300695')
self.assertEqual(account_test_iban_wrong.acc_type, 'iban', "The IBAN must be valid")
self.assertFalse(account_test_iban_wrong.l10n_ch_postal, "A valid swiss IBAN should set the postal reference")
def test_isr(self):
#Let us test the generation of an ISR for an invoice, first by showing an
#ISR report is only generated when Odoo has all the data it needs.
invoice_1 = self.create_invoice('base.CHF')
self.isr_not_generated(invoice_1)
#Now we add an account for payment to our invoice, but still cannot generate the ISR
test_account = self.create_account('250097798')
invoice_1.partner_bank_id = test_account
self.isr_not_generated(invoice_1)
#Finally, we add bank coordinates to our account. The ISR should now be available to generate
test_bank = self.env['res.bank'].create({
'name':'Money Drop',
'l10n_ch_postal_chf':'010391391'
})
test_account.bank_id = test_bank
self.isr_generated(invoice_1)
#Now, let us show that, with the same data, an invoice in euros does not generate any ISR (because the bank does not have any EUR postal reference)
invoice_2 = self.create_invoice('base.EUR')
invoice_2.partner_bank_id = test_account
self.isr_not_generated(invoice_2)
|
sih4sing5hong5/hue7jip8
|
匯入/management/commands/族語辭典1轉檔.py
|
Python
|
mit
| 1,474
| 0
|
from os import makedirs
from os.path import join
from posix import listdir
from django.conf import settings
from django.core.management.base import BaseCommand
from libavwrapper.avconv import Input, Output, AVConv
from libavwrapper.codec import AudioCodec, NO_VIDEO
from 匯入.族語辭典 import 代碼對應
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'語言',
|
type=str,
help='選擇的族語'
)
def handle(self, *args, **參數):
# 檢查avconv有裝無
代碼 = 代碼對應[參數['語言']]
語料目錄 = join(settings.BASE_DIR, '語料', '族語辭典', 代碼)
目標目錄 = join(settings.BASE_DIR, '語料', '族語辭典wav', 代碼)
makedirs(目標目錄, exist_ok=True)
for 檔名 in sorted(listdir(語料目錄)):
if 檔名.endswith('.mp3'):
來源 = join(語料目錄,
|
檔名)
目標 = join(目標目錄, 檔名[:-4] + '.wav')
目標聲音格式 = AudioCodec('pcm_s16le')
目標聲音格式.channels(1)
目標聲音格式.frequence(16000)
原始檔案 = Input(來源)
網頁檔案 = Output(目標).overwrite()
指令 = AVConv('avconv', 原始檔案, 目標聲音格式, NO_VIDEO, 網頁檔案)
程序 = 指令.run()
程序.wait()
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/distutils/tests/test_check.py
|
Python
|
apache-2.0
| 5,711
| 0.000525
|
"""Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.Loggin
|
gSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = ch
|
eck(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
utarsuno/quasar_source
|
deprecated/code_api/code_section.py
|
Python
|
mit
| 979
| 0.022472
|
# coding=utf-8
"""This module, code_section.py, is an abstraction for code sections. Needed for ordering code chunks."""
class CodeSection(object):
"""Represents a single code section of a source code file."""
def __init__(self, section_name):
self._section_name = section_name
self._code_chunks = []
def add_code_chunk_at_start(self, code_chunk):
"""Adds a code chunk to the start of this c
|
ode section."""
self._code_chunks.insert(0, code_chunk)
def add_code_chunk(self, code_chunk):
"""Adds a code chunk to this code section."""
self._code_chunks.append(code_chunk)
def get_all_code_chunks(self):
"""Returns a list of all the code chunks in this code section."""
return self._code_chunks
@propert
|
y
def empty(self) -> bool:
"""Returns a boolean indicating if this code section is empty or not."""
return len(self._code_chunks) == 0
@property
def name(self) -> str:
"""Returns the name of this code section."""
return self._section_name
|
pbarton666/buzz_bot
|
djangoproj/djangoapp/csc/corpus/migrations/0001_initial.py
|
Python
|
mit
| 8,362
| 0.008012
|
from south.db import db
from django.db import models
from csc.corpus.models import *
class Migration:
def forwards(self, orm):
# Adding model 'TaggedSentence'
db.create_table('tagged_sentences', (
('text', orm['corpus.TaggedSentence:text']),
('language', orm['corpus.TaggedSentence:language']),
('sentence', orm['corpus.TaggedSentence:sentence']),
))
db.send_create_signal('corpus', ['TaggedSentence'])
# Adding model 'Language'
db.create_table('corpus_language', (
('id', orm['corpus.Language:id']),
('name', orm['corpus.Language:name']),
('sentence_count', orm['corpus.Language:sentence_count']),
))
db.send_create_signal('corpus', ['Language'])
# Adding model 'DependencyParse'
db.create_table('dependency_parses', (
('id', orm['corpus.DependencyParse:id']),
('sentence', orm['corpus.DependencyParse:sentence']),
('linktype', orm['corpus.DependencyParse:linktype']),
('word1', orm['corpus.DependencyParse:word1']),
('word2', orm['corpus.DependencyParse:word2']),
('index1', orm['corpus.DependencyParse:index1']),
('index2', orm['corpus.DependencyParse:index2']),
))
db.send_create_signal('corpus', ['DependencyParse'])
# Adding model 'Sentence'
db.create_table('sentences', (
('id', orm['corpus.Sentence:id']),
('text', orm['corpus.Sentence:text']),
('creator', orm['corpus.Sentence:creator']),
('created_on', orm['corpus.Sentence:created_on']),
('language', orm['corpus.Sentence:language']),
('activity', orm['corpus.Sentence:activity']),
('score', orm['corpus.Sentence:score']),
))
db.send_create_signal('corpus', ['Sentence'])
def backwards(self, orm):
# Deleting model 'TaggedSentence'
db.delete_table('tagged_sentences')
# Deleting model 'Language'
db.delete_table('corpus_language')
# Deleting model 'DependencyParse'
db.delete_table('dependency_parses')
# Deleting model 'Sentence'
db.delete_table('sentences')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.m
|
odels.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.model
|
s.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'corpus.dependencyparse': {
'Meta': {'db_table': "'dependency_parses'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index1': ('django.db.models.fields.IntegerField', [], {}),
'index2': ('django.db.models.fields.IntegerField', [], {}),
'linktype': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sentence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Sentence']"}),
'word1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'word2': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'corpus.language': {
'id': ('django.db.models.fields.CharField', [], {'max_length': '16', 'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sentence_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'corpus.sentence': {
'Meta': {'db_table': "'sentences'"},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Activity']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Language']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'votes': ('django.contrib.contenttypes.generic.GenericRelation', [], {'to': "orm['voting.Vote']"})
},
'corpus.taggedsentence': {
'Meta': {'db_table': "'tagged_sentences'"},
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Language']"}),
'sentence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Sentence']", 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'events.activity': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'voting.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'db_table': "'votes'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.field
|
rwatson/chromium-capsicum
|
o3d/tests/selenium/selenium_utilities.py
|
Python
|
bsd-3-clause
| 11,831
| 0.007776
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility scripts for selenium.
A collection of utility scripts for selenium test cases to use.
"""
import os
import re
import time
import unittest
import base64
import gflags
import selenium_constants
FLAGS = gflags.FLAGS
SUFFIXES = ["small", "medium", "large"]
def IsValidTestType(test_type):
"""Returns True if test_type is a "small", "medium" or "large"."""
return test_type.lower() in SUFFIXES
def IsValidSuffix(name):
"""Returns True if name ends in a valid test type."""
name = name.lower()
for suffix in SUFFIXES:
if name.endswith(suffix):
return True
return False
def ScreenshotNameFromTestName(name):
name = StripTestTypeSuffix(name)
if name.startswith("Test"):
# Make sure these are in order.
prefixes = ["TestStress", "TestSample", "Test"]
for prefix in prefixes:
if name.startswith(prefix):
name = name[len(prefix):]
break
# Lowercase the name only for custom test methods.
name = name.lower()
name = name.replace("_", "-")
name = name.replace("/", "_")
return name
def StripTestTypeSuffix(name):
"""Removes the suffix from name if it is a valid test type."""
name_lower = name.lower()
for suffix in SUFFIXES:
if name_lower.endswith(suffix):
return name[:-len(suffix)]
return name
def GetArgument(string):
"""Returns the value inside the first set of parentheses in a string.
Args:
string: String in the format "identifier(args)"
Returns:
args from string passed in. None if there were no parentheses.
"""
match = re.match("\w+\(([^)]+)\)", string)
if match:
return match.group(1)
return None
def TakeScreenShot(session, browser, client, filename):
"""Takes a screenshot of the o3d display buffer.
This function is the preferred way to capture an image of the plugin.
Uses gflags:
If gflags.FLAGS.screenshots is False then screen shots will not be taken.
gflags.FLAGS.screenshotsdir must be set to the path to save screenshots in.
Args:
session: Selenium session.
browser: Name of the browser running the test.
client: String that in javascript will return the o3d client.
filename: Name of screenshot.
Returns:
success: True on success, False on failure.
"""
# If screenshots enabled
if gflags.FLAGS.screenshots:
full_path = os.path.join(os.getcwd(),
FLAGS.screenshotsdir,
filename)
return TakeScreenShotAtPath(session,
browser,
client,
full_path)
else:
# Screenshots not enabled, return true (success).
return True
def TakeScreenShotAtPath(session,
browser,
client,
filename):
"""Takes a screenshot of the o3d display buffer.
This should be used by tests that need to specify exactly where to save the
image or don't want to use gflags.
Args:
session: Selenium session.
browser: Name of the browser running the test.
client: String that in javascript will return the o3d client.
filename: Full path to screenshot to be saved.
Returns:
success: True on success, False on failure.
"""
session.window_focus()
# Resize window, and client area if needed.
session.run_script(
"(function() {\n"
" var needResize = false;\n"
" var divs = window.document.getElementsByTagName('div');\n"
" for (var ii = 0; ii < divs.length; ++ii) {\n"
" var div = divs[ii];\n"
" if (div.id && div.id == 'o3d') {\n"
" var widthSpec = div.style.width;\n"
" if (widthSpec.indexOf('%') >= 0) {\n"
" div.style.width = '800px';\n"
" div.style.height = '600px';\n"
" needResize = true;\n"
" break;\n"
" }\n"
" }\n"
" }\n"
" window.o3d_seleniumNeedResize = needResize;\n"
"} ());\n")
need_client_resize = (
session.get_eval("window.o3d_selen
|
iumNeedResize") == "true")
if need_client_resize:
session.wait_for_condition(
"window.%s.width == 800 && window.%s.height == 600" % (client, client),
2000
|
0)
else:
session.run_script("window.resizeTo(%d, %d)" %
(selenium_constants.RESIZE_WIDTH,
selenium_constants.RESIZE_HEIGHT))
# Execute screenshot capture code
# Replace all backslashes with forward slashes so it is parsed correctly
# by Javascript
full_path = filename.replace("\\", "/")
# Attempt to take a screenshot of the display buffer
eval_string = ("%s.toDataURL()" % client)
# Set Post render call back to take screenshot
script = ["window.g_selenium_post_render = false;",
"window.g_selenium_save_screen_result = false;",
"var frameCount = 0;",
"%s.setPostRenderCallback(function() {" % client,
" ++frameCount;",
" if (frameCount >= 3) {",
" %s.clearPostRenderCallback();" % client,
" window.g_selenium_save_screen_result = %s;" % eval_string,
" window.g_selenium_post_render = true;",
" } else {",
" %s.render()" % client,
" }",
"})",
"%s.render()" % client]
session.run_script("\n".join(script))
# Wait for screenshot to be taken.
session.wait_for_condition("window.g_selenium_post_render", 20000)
# Get result
data_url = session.get_eval("window.g_selenium_save_screen_result")
expected_header = "data:image/png;base64,"
if data_url.startswith(expected_header):
png = base64.b64decode(data_url[len(expected_header):])
file = open(full_path + ".png", 'wb')
file.write(png)
file.close()
return True
return False
class SeleniumTestCase(unittest.TestCase):
"""Wrapper for TestCase for selenium."""
def __init__(self, name, browser, path_to_html, test_type=None,
sample_path=None, options=None):
"""Constructor for SampleTests.
Args:
name: Name of unit test.
session: Selenium session.
browser: Name of browser.
path_to_html: path to html from server root
test_type: Type of test ("small", "medium", "large")
sample_path: Path to test.
load_timeout: Time to wait for page to load (ms).
run_timeout: Time to wait for test to run.
options: list of option strings.
"""
unittest.TestCase.__init__(self, name)
self.name = name
self.session = None
self.browser = browser
self.test_type = tes
|
AndroidOpenDevelopment/android_external_chromium_org
|
tools/android/adb_profile_chrome.py
|
Python
|
bsd-3-clause
| 288
| 0.003472
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from adb_pro
|
file_chrome import main
if __name__ == '__
|
main__':
sys.exit(main.main())
|
kasemir/org.csstudio.display.builder
|
org.csstudio.display.builder.model/examples/python/jython.py
|
Python
|
epl-1.0
| 377
| 0.005305
|
# Script executed by jython
# Can import any Java package
from org.csstudio.display.builder.runtime.script impor
|
t PVUtil
# Can also import some python code that's available under Jython
import sys, time
trigger = PVUtil.getInt(pvs
|
[0])
if trigger:
info = "%s,\ninvoked at %s" % (sys.version, time.strftime("%Y-%m-%d %H:%M:%S"))
widget.setPropertyValue("text", info)
|
CDSherrill/psi4
|
tests/pytests/test_misc.py
|
Python
|
lgpl-3.0
| 3,413
| 0.004102
|
import pytest
from .addons import using_networkx
from .utils
|
import *
import math
import numpy as np
import qcelemental as qcel
import psi4
from psi4.
|
driver import qcdb
pytestmark = pytest.mark.quick
def hide_test_xtpl_fn_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy('cbs', scf_basis='cc-pvdz', scf_scheme=psi4.driver_cbs.xtpl_highest_1)
assert 'Replace extrapolation function with function name' in str(e.value)
def hide_test_xtpl_cbs_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy(psi4.cbs, scf_basis='cc-pvdz')
#psi4.energy(psi4.driver.driver_cbs.complete_basis_set, scf_basis='cc-pvdz')
assert 'Replace cbs or complete_basis_set function with cbs string' in str(e.value)
@pytest.mark.parametrize("inp,out", [
((2, 'C2V'), 2),
(('A2', 'c2v'), 2),
(('2', 'C2V'), 2),
])
def test_parse_cotton_irreps(inp, out):
idx = psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert idx == out
@pytest.mark.parametrize("inp", [
((5, 'cs')),
(('5', 'cs')),
((0, 'cs')),
(('a2', 'cs')),
])
def test_parse_cotton_irreps_error(inp):
with pytest.raises(psi4.ValidationError) as e:
psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert 'not valid for point group' in str(e.value)
# <<< TODO Deprecated! Delete in Psi4 v1.5 >>>
@using_networkx
def test_deprecated_qcdb_align_b787():
soco10 = """
O 1.0 0.0 0.0
C 0.0 0.0 0.0
O -1.0 0.0 0.0
units ang
"""
sooc12 = """
O 1.2 4.0 0.0
O -1.2 4.0 0.0
C 0.0 4.0 0.0
units ang
"""
ref_rmsd = math.sqrt(2. * 0.2 * 0.2 / 3.) # RMSD always in Angstroms
oco10 = qcel.molparse.from_string(soco10)
oco12 = qcel.molparse.from_string(sooc12)
oco10_geom_au = oco10['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
oco12_geom_au = oco12['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
with pytest.warns(FutureWarning) as err:
rmsd, mill = qcdb.align.B787(
oco10_geom_au, oco12_geom_au, np.array(['O', 'C', 'O']), np.array(['O', 'O', 'C']), verbose=4, do_plot=False)
assert compare_values(ref_rmsd, rmsd, 6, 'known rmsd B787')
def test_deprecated_qcdb_align_scramble():
with pytest.warns(FutureWarning) as err:
mill = qcdb.align.compute_scramble(4, do_resort=False, do_shift=False, do_rotate=False, deflection=1.0, do_mirror=False)
assert compare_arrays([0,1,2,3], mill.atommap, 4, 'atommap')
# <<< TODO Deprecated! Delete when the error messages are removed. >>>
def test_deprecated_dcft_calls():
psi4.geometry('He')
err_substr = "All instances of 'dcft' should be replaced with 'dct'."
driver_calls = [psi4.energy, psi4.optimize, psi4.gradient, psi4.hessian, psi4.frequencies]
for call in driver_calls:
with pytest.raises(psi4.UpgradeHelper) as e:
call('dcft', basis='cc-pvdz')
assert err_substr in str(e.value)
# The errors trapped below are C-side, so they're nameless, Py-side.
with pytest.raises(Exception) as e:
psi4.set_module_options('dcft', {'e_convergence': 9})
assert err_substr in str(e.value)
with pytest.raises(Exception) as e:
psi4.set_module_options('dct', {'dcft_functional': 'odc-06'})
assert err_substr in str(e.value)
|
tndatacommons/tndata_backend
|
tndata_backend/goals/utils.py
|
Python
|
mit
| 3,686
| 0
|
import csv
import re
from io import TextIOWrapper
from django.conf import settings
from django.core.cache import cache
from django.utils.termcolors import colorize
# Import clog if we're in debug otherwise make it a noop
if settings.DEBUG:
from clog.clog import clog
else:
def clog(*args, **kwargs):
pass
def pop_first(data, key):
"""Pop the given key from the given `data` dict, and if the popped item
is a list, return the first element. This is handy for those cases where,
in the api, `request.data.pop(whatever)` sometimes gives a list and other
times is an object.
"""
result = data.pop(key)
if isinstance(result, list):
result = result[0]
return result
def num_user_selections(obj):
"""Return a count of the given object's UserXXXX instances (where XXXX is
the name of one of our content models). This will tell how many users
have selected this item.
Valid for Category, Goal, Action instances.
"""
model = obj._meta.model_name
if model not in ['category', 'goal', 'action']:
raise ValueError("{0} is not a supported object type".format(model))
method = "user{0}_set".format(model)
return getattr(obj, method).count()
# ------------------------------------------
#
# Helper functions for cleaning text content
#
# ------------------------------------------
def clean_title(text):
"""Titles: collapse all whitespace, remove ending periods, strip."""
if text:
text = re.sub(r'\s+', ' ', text).strip() # collapse whitespace
if text.endswith("."):
text = text[:-1]
return text
def clean_notification(text):
"""Notification text: collapse all whitespace, strip, include an ending
period (if not a ? or a !).
"""
if text:
text = re.sub(r'\s+', ' ', text).strip() # collapse whitespace
if text[-1] not in ['.', '?', '!']:
text += "."
return text
def strip(text):
"""Conditially call text.strip() if the input text is truthy."""
if text:
text = text.strip()
return text
def read_uploaded_csv(uploaded_file, encoding='utf-8', errors='ignore'):
"""This is a generator that takes an uploaded file (such as an instance of
InMemoryUploadedFile.file), converts it to a string (instead of bytes)
representation, then parses it as a CSV.
Returns a list of lists containing strings, and removes any empty rows.
NOTES:
1. This makes a big assumption about utf-8 encodings, and the errors
param means we potentially lose data!
2. InMemoryUploadedFileSee: http://stackoverflow.com/a/16243182/182778
|
"""
file = TextIOWrapper(
uploaded_file.file,
encoding=encoding,
newline='',
errors=errors
)
for row in csv.reader(file):
if any(row):
yield row
def delete_content(prefix):
"""Delete content whose title/name starts with the given prefix."""
from goals.models import Action, Category, Goal, Trigger
print("Deleting content that startswith='{}'".format(prefix))
actio
|
ns = Action.objects.filter(title__startswith=prefix)
print("Deleting {} Actions...".format(actions.count()))
actions.delete()
triggers = Trigger.objects.filter(name__startswith=prefix)
print("Deleting {} Triggers...".format(triggers.count()))
triggers.delete()
goals = Goal.objects.filter(title__startswith=prefix)
print("Deleting {} Goals...".format(goals.count()))
goals.delete()
cats = Category.objects.filter(title__startswith=prefix)
print("Deleting {} Categories...".format(cats.count()))
cats.delete()
print("...done.")
|
derekmd/opentag-presenter
|
tags/zschemaname.py
|
Python
|
bsd-2-clause
| 2,160
| 0.056019
|
from font import font
class zschemaname( font ):
"""
Displays a header name for a Z Schema. It may contain text, images,
equations, etc... but the width of it should be kept to a minimum so
it isn't wider than the containing Z Schema
|
box. See
<a href="zschema.html"><zschema></a> for proper usage.
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the for constructor.
"""
apply( font.__init__, (self,) + args )
self.setColorDefined( self.hasProperty("color") )
def render( self, app, x, y ):
"""
-app, SlideApplication object
-x, x coordinate to start drawing at
-y, y coordinate to start drawing at
Ret
|
urns x, y coordinates where the rendering left off.
"""
#
# Don't draw anything it this isn't a direct child of a
# <zschema> tag in the XML document.
#
from zschema import zschema
if not isinstance(self.getContainer(), zschema):
return x, y
if not self.colorDefined():
borderQColor = self.getContainer().getBorderColor()
self.setProperty( "color", str(borderQColor.name()) )
container = self.getContainer()
self.setProperty( "marginleft",
container.getProperty("marginleft") + \
container.getProperty("cellspacing") + \
16 )
self.setProperty( "marginright",
app.getWidth() - \
(x + container.getWidth()) + \
self.getProperty("cellpadding") + \
16)
x = self.getProperty( "marginleft" )
return font.render( self, app, x, y )
def move( self, x, y ):
"""
"""
x = x + 16
font.move( self, x, y )
def setColorDefined( self, colorDefined ):
"""
"""
self.__colorDefined = colorDefined
def colorDefined( self ):
"""
"""
try:
return self.__colorDefined
except AttributeError:
self.__colorDefined = false
return false
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
return font.getHtml( self )
|
pebble/spacel-provision
|
src/test/provision/app/alarm/endpoint/__init__.py
|
Python
|
mit
| 688
| 0
|
import unittest
RESOURCE_NAME = 'Test_Resource'
class BaseEndpointTest(unittest.TestCase):
def setUp(self
|
):
self.endpoint = None
self.resources = {}
self.template = {
'Resources': self.resources
}
def test_reso
|
urce_name(self):
if self.endpoint:
resource_name = self.endpoint.resource_name(RESOURCE_NAME)
self.assertEquals(self.topic_resource(), resource_name)
def subscriptions(self):
resource_name = self.topic_resource()
return self.resources[resource_name]['Properties']['Subscription']
def topic_resource(self):
raise ValueError('Must override topic_resource.')
|
NaN-tic/nereid
|
nereid/config.py
|
Python
|
gpl-3.0
| 1,087
| 0.00184
|
#This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import imp
from flask.config import ConfigAttribute, Config as ConfigBase # noqa
class Config(ConfigBase):
"Configuration without the root_path"
def __init__(self, defaults=None):
dict.__init__(self, defaults or {})
def from_pyfile(self, filename):
"""
Updates the values in the config from a Python file. This function
|
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
"""
d = imp.new_module('config')
|
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
|
scienceopen/histutils
|
XMLparamPrint.py
|
Python
|
mit
| 340
| 0
|
#!/u
|
sr/bin/env python
"""
demos reading HiST camera parameters from XML file
"""
from histutils.hstxmlparse import xmlparam
from argparse import ArgumentParser
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("fn", help="xml filename to parse")
p = p.parse_args()
params = xmlparam(p.fn)
|
print(params)
|
wunderlist/hamustro
|
utils/send_single_message.py
|
Python
|
mit
| 1,002
| 0.00499
|
from __future__ import print_function
|
import os
import time
import json
import datetime
import argparse
import requests
from message import Message
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f','--format', default="protobuf", choices=["protobuf","json"], help="message format")
parser
|
.add_argument('CONFIG', type=argparse.FileType('r'), help="configuration file")
parser.add_argument('URL', help="tavis url")
args = parser.parse_args()
config = json.load(args.CONFIG)
shared_secret = config.get('shared_secret', 'ultrasafesecret')
msg = Message(random_payload=False)
msg.time = int(time.time())
resp = requests.post(args.URL, headers={
'X-Hamustro-Time': msg.time,
'X-Hamustro-Signature': msg.signature(shared_secret, args.format),
'Content-Type': 'application/{}; charset=utf-8'.format(args.format)
}, data=msg.get_body(args.format))
print('Response code: {}'.format(resp.status_code))
|
nkgilley/home-assistant
|
homeassistant/components/geofency/__init__.py
|
Python
|
apache-2.0
| 4,573
| 0.000219
|
"""Support for Geofency."""
import logging
from aiohttp import web
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_WEBHOOK_ID,
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_NOT_HOME,
)
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import slugify
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_MOBILE_BEACONS = "mobile_beacons"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_MOBILE_BEACONS, default=[]): vol.All(
cv.ensure_list, [cv.string]
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_ADDRESS = "address"
ATTR_BEACON_ID = "beaconUUID"
ATTR_CURRENT_LATITUDE = "currentLatitude"
ATTR_CURRENT_LONGITUDE = "currentLongitude"
ATTR_DEVICE = "device"
ATTR_ENTRY = "entry"
BEACON_DEV_PREFIX = "beacon"
LOCATION_ENTRY = "1"
LOCATION_EXIT = "0"
TRACKER_UPDATE = f"{DOMAIN}_tracker_update"
def _address(value: str) -> str:
r"""Coerce address by replacing '\n' with ' '."""
return value.replace("\n", " ")
WEBHOOK_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, _address),
vol.Required(ATTR_DEVICE): vol.All(cv.string, slugify),
vol.Required(ATTR_ENTRY): vol.Any(LOCATION_ENTRY, LOCATION_EXIT),
vol.Required(ATTR_LATITUDE): cv.latitude,
vol.Required(ATTR_LONGITUDE): cv.longitude,
vol.Required(ATTR_NAME): vol.All(cv.string, slugify),
vol.Optional(ATTR_CURRENT_LATITUDE): cv.latitude,
vol.Optional(ATTR_CURRENT_LONGITUDE): cv.longitude,
vol.Optional(ATTR_BEACON_ID): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, hass_config):
"""Set up the Geofency component."""
config = hass_config.get(DOMAIN, {})
mobile_beacons = config.get(CONF_MOBILE_BEACONS, [])
hass.data[DOMAIN] = {
"beacons": [slugify(beacon) for beacon in mobile_beacons],
"devices": set(),
"unsub_device_tracker": {},
}
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook from Geofency."""
try:
data = WEBHOOK_SCHEMA(dict(await request.post()))
except vol.MultipleInvalid as error:
return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY)
if _is_mobile_beacon(data, hass.data[DOMAIN]["beacons"]):
return _set_location(hass, data, None)
if data["entry"] == LOCATION_ENTRY:
location_name = data["name"]
else:
location_name = STATE_NOT_HOME
if ATTR_CURRENT_LATITUDE in data:
data[ATTR_LATITUDE] = data[ATTR_CURRENT_LATITUDE]
data[ATTR_LONGITUDE] = data[ATTR_CURRENT_L
|
ONGITUDE]
return _set_location(hass, data, location_name)
def _is_mobile_beacon(data, mobile_beacons):
"""Check if we have a mobile beacon."""
return ATTR_BEACON_ID in data and data["name"] in mobile_beacons
|
def _device_name(data):
"""Return name of device tracker."""
if ATTR_BEACON_ID in data:
return f"{BEACON_DEV_PREFIX}_{data['name']}"
return data["device"]
def _set_location(hass, data, location_name):
"""Fire HA event to set location."""
device = _device_name(data)
async_dispatcher_send(
hass,
TRACKER_UPDATE,
device,
(data[ATTR_LATITUDE], data[ATTR_LONGITUDE]),
location_name,
data,
)
return web.Response(text=f"Setting location for {device}", status=HTTP_OK)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "Geofency", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, DEVICE_TRACKER)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)()
await hass.config_entries.async_forward_entry_unload(entry, DEVICE_TRACKER)
return True
# pylint: disable=invalid-name
async_remove_entry = config_entry_flow.webhook_async_remove_entry
|
macobo/documentation
|
code_snippets/results/result.api-comment-edit.py
|
Python
|
bsd-3-clause
| 224
| 0.017857
|
{'comment': {'handle': 'matt@example.com'
|
,
'id': 2603645287324504065,
'message': 'I think differently now.',
'resource': '/api/v1/comments/2603645287324504065',
'url': '/event/jump_to?event_id=2603645287324504065
|
'}}
|
adrianpaesani/odoo-argentina
|
l10n_ar_invoice/models/afip.py
|
Python
|
agpl-3.0
| 6,822
| 0.000293
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='g
|
et_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type',
|
'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
|
apache/incubator-airflow
|
tests/providers/google/cloud/transfers/test_oracle_to_gcs.py
|
Python
|
apache-2.0
| 6,070
| 0.002471
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import cx_Oracle
from airflow.providers.google.cloud.transfers.oracle_to_gcs import OracleToGCSOperator
TASK_ID = 'test-oracle-to-gcs'
ORACLE_CONN_ID = 'oracle_conn_test'
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
GZIP = False
ROWS = [('mock_row_content_1', 42), ('mock_row_content_2', 43), ('mock_row_content_3', 44)]
CURSOR_DESCRIPTION = (
('some_str', cx_Oracle.DB_TYPE_VARCHAR, None, None, None, None, None),
('some_num', cx_Oracle.DB_TYPE_NUMBER, None, None, None, None, None),
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n',
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "NULLABLE", "name": "some_str", "type": "STRING"}, ',
b'{"mode": "NULLABLE", "name": "some_num", "type": "NUMERIC"}]',
]
class TestOracleToGoogleCloudStorageOperator(unittest.TestCase):
def test_init(self):
"""Test OracleToGoogleCloudStorageOperator instance is properly initialized."""
op = OracleToGCSOperator(task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME)
assert op.task_id == TASK_ID
assert op.sql == SQL
assert op.bucket == BUCKET
assert op.filename == JSON_FILENAME
@mock.patch('airflow.providers.google.cloud.transfers.oracle_to_gcs.OracleHook')
@mock.patch('airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook')
def test_exec_success_json(self, gcs_hook_mock_class, oracle_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = OracleToGCSOperator(
task_id=TASK_ID, oracle_conn_id=ORACLE_CONN_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME
)
oracle_hook_mock = oracle_hook_mock_class.return_value
oracle_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
oracle_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
assert BUCKET == bucket
assert JSON_FILENAME.format(0) == obj
assert 'application/json' == mime_type
assert GZIP == gzip
with open(tmp_filename, 'rb') as file:
assert b''.join(NDJSON_LINES) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
oracle_hook_mock_class.assert_called_once_with(oracle_conn_id=ORACLE_CONN_ID)
oracle_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.providers.google.cloud.transfers.oracle_to_gcs.OracleHook')
@mock.patch('airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook')
def test_file_splitting(self, gcs_hook_mock_class, oracle_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
oracle_hook_mock = oracle_hook_mock_class.return_value
oracle_hook_mock.get_conn().cursor().__iter__.return_valu
|
e = iter(ROWS)
oracle_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): N
|
DJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
assert BUCKET == bucket
assert 'application/json' == mime_type
assert GZIP == gzip
with open(tmp_filename, 'rb') as file:
assert expected_upload[obj] == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op = OracleToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]),
)
op.execute(None)
@mock.patch('airflow.providers.google.cloud.transfers.oracle_to_gcs.OracleHook')
@mock.patch('airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook')
def test_schema_file(self, gcs_hook_mock_class, oracle_hook_mock_class):
"""Test writing schema files."""
oracle_hook_mock = oracle_hook_mock_class.return_value
oracle_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
oracle_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
if obj == SCHEMA_FILENAME:
with open(tmp_filename, 'rb') as file:
assert b''.join(SCHEMA_JSON) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op = OracleToGCSOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME, schema_filename=SCHEMA_FILENAME
)
op.execute(None)
# once for the file and once for the schema
assert 2 == gcs_hook_mock.upload.call_count
|
benjaoming/simple-pypi-statistics
|
simple_pypi_statistics/api.py
|
Python
|
gpl-2.0
| 6,238
| 0.000321
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
"""
Get package download statistics from PyPI
"""
# Based on https://github.com/collective/Products.PloneSoftwareCenter\
# /commit/601558870175e35cfa4d05fb309859e580271a1f
# For sorting XML-RPC results
from collections import deque
# HTTPS connection for normalize function
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import json
from datetime import datetime
# PyPI's XML-RPC methods
# https://wiki.python.org/moin/PyPIXmlRpc
try:
import xmlrpc.client as xmlrpc
except ImportError: # Python 2
import xmlrpclib as xmlrpc
PYPI_HOST = 'pypi.python.org'
PYPI_URL = 'https://%s/pypi' % PYPI_HOST
PYPI_JSON = '/'.join([PYPI_URL, '%s/json'])
PYPI_XML = xmlrpc.ServerProxy(PYPI_URL)
# PyPI JSON
# http://stackoverflow.com/a/28786650
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
def by_two(source):
"""
"""
out = []
for x in source:
out.append(x)
if len(out) == 2:
yield out
out = []
def count_downloads(package, version=None, json=False):
"""
"""
count = 0
items = []
for urls, data in get_release_info([package], json=json):
for url in urls:
filename = url['filename']
downloads = url['downloads']
if not json:
upload_time = url['upload_time'].timetuple()
upload_time = datetime.strftime('%Y-%m-%d', upload_time).date()
else:
# Convert 2011-04-14T02:16:55 to 2011-04-14
upload_time = url['upload_time'].split('T')[0]
upload_time = datetime.strptime(upload_time, '%Y-%m-%d').date()
if version == data['version'] or not version:
items.append(
{
'upload_time': upload_time,
'filename': filename,
'downloads': downloads,
}
)
count += url['downloads']
return count, items
# http://stackoverflow.com/a/28786650
def get_jsonparsed_data(url):
"""Receive the content of ``url``, parse it as JSON and return the
object.
"""
response = urlopen(url)
data = response.read().decode('utf-8')
return json.loads(data)
def normalize(name):
"""
"""
http = HTTPSConnection(PYPI_HOST)
http.request('HEAD', '/pypi/%s/' % name)
r = http.getresponse()
if r.status not in (200, 301):
raise ValueError(r.reason)
return r.getheader('location', name).split('/')[-1]
def get_releases(packages):
"""
"""
mcall = xmlrpc.MultiCall(PYPI_XML)
called_packages = deque()
for package in packages:
mcall.package_releases(package, True)
called_packages.append(package)
if len(called_packages) == 100:
result = mcall()
mcall = xmlrpc.MultiCall(PYPI_XML)
for releases in result:
yield called_packages.popleft(), releases
result = mcall()
for releases in result:
yield called_packages.popleft(), releases
def get_release_info(packages, json=False):
"""
"""
if json:
for package in packages:
data = get_jsonparsed_data(PYPI_JSON % package)
for release in data['releases']:
urls = data['releases'][release]
yield urls, data['info']
return
mcall = xmlrpc.MultiCall(PYPI_XML)
i = 0
for package, releases in get_releases(packages):
for version in releases:
mcall.release_urls(package, version)
mcall.release_data(package, version)
i += 1
if i % 50 == 49:
result = mcall()
mcall = xmlrpc.MultiCall(PYPI_XML)
for urls, data in by_two(result):
yield urls, data
result = mcall()
for urls, data in by_two(result):
yield urls, data
def get_stats(package):
"""
Fetch raw statistics of a package, no corrections are made to this
data. You should use get_corrected_stats().
"""
grand_total = 0
if '==' in package:
package, version = package.split('==')
try:
package = normalize(package)
version = None
except ValueError:
raise RuntimeError('No such module or package %r' % package)
# Count downloads
total, releases = count_downloads(
package,
json=True,
version=version,
)
result = {
'version': version,
'releases': releases,
}
grand_total += total
return result, grand_total, version
def get_corrected_stats(package, use_honeypot=True):
"""
Fetches statistics for `package` and then corrects them using a special
honeypot.
"""
honeypot, __, __ = get_stats('python-bogus-project-honeypot')
if not honeypot:
raise RuntimeError("Could not get honeypot
|
data")
honeypot = honeypot['releases']
# Add a field used to store diff
|
when choosing the best honey pot release
# for some statistic
for x in honeypot:
x['diff'] = 0
stats, __, version = get_stats(package)
if not stats:
return
# Denote release date diff and choose the honey pot release that's closest
# to the one of each release
releases = stats['releases']
for release in releases:
# Sort by absolute difference
honeypot.sort(key=lambda x: abs(
(x['upload_time'] - release['upload_time']).total_seconds()
))
# Multiple candidates
honeypot_filtered = list(filter(lambda x: x['diff'] == honeypot[0]['diff'], honeypot))
average_downloads = sum([x['downloads'] for x in honeypot_filtered]) / len(honeypot_filtered)
release['downloads'] = release['downloads'] - average_downloads
# Re-calculate totals
total_count = sum([x['downloads'] for x in releases])
return stats, total_count, version
|
kgiusti/pyngus
|
examples/perf-tool.py
|
Python
|
apache-2.0
| 6,874
| 0
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Tool to gauge message passing throughput and latencies"""
import logging
import optparse
import time
import uuid
import pyngus
from proton import Message
from utils import connect_socket
from utils import get_host_port
from utils import process_connection
LOG = logging.getLogger()
LOG.addHandler(logging.StreamHandler())
class ConnectionEventHandler(pyngus.ConnectionEventHandler):
def __init__(self):
super(ConnectionEventHandler, self).__init__()
def connection_failed(self, connection, error):
"""Connection has failed in some way."""
LOG.warn("Connection failed callback: %s", error)
def connection_remote_closed(self, connection, pn_condition):
"""Peer has closed its end of the connection."""
LOG.debug("connection_remote_closed condition=%s", pn_condition)
connection.close()
class SenderHandler(pyngus.SenderEventHandler):
def __init__(self, count):
self._count = count
self._msg = Message()
self.calls = 0
self.total_ack_latency = 0.0
self.stop_time = None
self.start_time = None
def credit_granted(self, sender_link):
if self.start_time is None:
self.start_time = time.time()
self._send_message(sender_link)
def _send_message(self, link):
now = time.time()
self._msg.body = {'tx-timestamp': now}
self._last_send = now
link.send(self._msg, self)
def __call__(self, link, handle, status, error):
now = time.time()
self.total_ack_latency += now - self._last_send
self.calls += 1
if self._count:
self._count -= 1
if self._count == 0:
self.stop_time = now
link.close()
return
self._send_message(link)
def sender_remote_closed(self, sender_link, pn_condition):
LOG.debug("Sender peer_closed condition=%s", pn_condition)
sender_link.close()
def sender_failed(self, sender_link, error):
"""Protocol error occurred."""
LOG.debug("Sender failed error=%s", error)
sender_link.close()
class ReceiverHandler(pyngus.ReceiverEventHandler):
def __init__(self, count, capacity):
self._count = count
self._capacity = capacity
self._msg = Message()
self.receives = 0
self.tx_total_latency = 0.0
def receiver_active(self, receiver_link):
receiver_link.add_capacity(self._capacity)
def receiver_remote_closed(self, receiver_link, pn_condition):
"""Peer has closed its end of the link."""
LOG.debug("receiver_remote_closed condition=%s", pn_condition)
receiver_link.close()
def receiver_failed(self, rec
|
eiver_link
|
, error):
"""Protocol error occurred."""
LOG.warn("receiver_failed error=%s", error)
receiver_link.close()
def message_received(self, receiver, message, handle):
now = time.time()
receiver.message_accepted(handle)
self.tx_total_latency += now - message.body['tx-timestamp']
self.receives += 1
if self._count:
self._count -= 1
if self._count == 0:
receiver.close()
return
lc = receiver.capacity
cap = self._capacity
if lc < (cap / 2):
receiver.add_capacity(cap - lc)
def main(argv=None):
_usage = """Usage: %prog [options]"""
parser = optparse.OptionParser(usage=_usage)
parser.add_option("-a", dest="server", type="string",
default="amqp://0.0.0.0:5672",
help="The address of the server [amqp://0.0.0.0:5672]")
parser.add_option("--node", type='string', default='amq.topic',
help='Name of source/target node')
parser.add_option("--count", type='int', default=100,
help='Send N messages (send forever if N==0)')
parser.add_option("--debug", dest="debug", action="store_true",
help="enable debug logging")
parser.add_option("--trace", dest="trace", action="store_true",
help="enable protocol tracing")
opts, _ = parser.parse_args(args=argv)
if opts.debug:
LOG.setLevel(logging.DEBUG)
host, port = get_host_port(opts.server)
my_socket = connect_socket(host, port)
# create AMQP Container, Connection, and SenderLink
#
container = pyngus.Container(uuid.uuid4().hex)
conn_properties = {'hostname': host,
'x-server': False}
if opts.trace:
conn_properties["x-trace-protocol"] = True
c_handler = ConnectionEventHandler()
connection = container.create_connection("perf_tool",
c_handler,
conn_properties)
r_handler = ReceiverHandler(opts.count, opts.count or 1000)
receiver = connection.create_receiver(opts.node, opts.node, r_handler)
s_handler = SenderHandler(opts.count)
sender = connection.create_sender(opts.node, opts.node, s_handler)
connection.open()
receiver.open()
while not receiver.active:
process_connection(connection, my_socket)
sender.open()
# Run until all messages transfered
while not sender.closed or not receiver.closed:
process_connection(connection, my_socket)
connection.close()
while not connection.closed:
process_connection(connection, my_socket)
duration = s_handler.stop_time - s_handler.start_time
thru = s_handler.calls / duration
permsg = duration / s_handler.calls
ack = s_handler.total_ack_latency / s_handler.calls
lat = r_handler.tx_total_latency / r_handler.receives
print("Stats:\n"
" TX Avg Calls/Sec: %f Per Call: %f Ack Latency %f\n"
" RX Latency: %f" % (thru, permsg, ack, lat))
sender.destroy()
receiver.destroy()
connection.destroy()
container.destroy()
my_socket.close()
return 0
if __name__ == "__main__":
main()
|
openUniverse/singularity
|
BensPractice/Practise2.py
|
Python
|
mit
| 2,825
| 0.013805
|
# # 1. Define a function max() that takes two numbers as arguments and returns the largest of them.
# # Use the if-then-else construct available in Python.
# # (It is true that Python has the max() function built in, but writing it yourself is nevertheless a good exercise.)
#
# def max (a, b):
# if a>b:
# return a
# else:
# return b
#
# print(max(8, 11))
#
# # 6. Define a function sum() and a function multiply() that sums and multiplies (respectively) all the numbers in a list of numbers.
# # For exa
|
mple, sum([1, 2, 3, 4]) sho
|
uld return 10, and multiply([1, 2, 3, 4]) should return 24.
#
# # n+=x means store n + x in n (means n = n + x)
# # n*=x means store n * x in n
# # = is not equals to it is store in
#
# NumList=[1, 2, 3, 4]
#
# def sum (list):
# n=0
# for element in list:
# n+= element
# return n
# print (sum(NumList))
#
# def mult (list):
# n=1
# for element in list:
# n*= element
# return n
# print (mult(NumList))
# 7. Define a function reverse() that computes the reversal of a string (string is a list of characters.
# For example, reverse("I am testing") should return the string "gnitset ma I". (Strings enver need to be reversed, dumb question.
# to do so hwoever, is "snake kobra" [::-1]
print ("snake kobra" [::-1])
pokemon = "snake kobra"
print (pokemon [::-1])
#the follwoing is a more complicated method to teach what each indivual thing in it means
def reverse(list):
length = len(list) # len gets the length of a list
newList = [] # creates a new, empty list
for element in range (0, length): # rangecreates a new list (x, y) from start number (x) to end number (y)
newList.append(list[(length-1) - element]) # "for containerName in" is a loop method
# .append is add to newList. A list is x long but python coutns starting from 0
# so length-1 is the position of the last element.
# it is building the list start at element 0
# ending position minus puts the ending element first then the next position is 1
# so it works backwards
return "".join(newList) #join the string in newList as string eg turn ["q","w","x"] into [qwx]
# PList= "snake kobra"
#
# print (PList.reverse ())
#
# # def reverse(list):
# # length = len(list) #len will get the length as a number
# # RevList = [] #Creates a new, empty list
# # for element in range(0, length): #creates a new list (x, y) from x to y
# # tempIndex = (length-1) - element
# # RevList.apend(list(tempIndex))
# # return "".join(RevList)
#Splitting Practise
|
jittat/ku-eng-direct-admission
|
scripts/filter_quota.py
|
Python
|
agpl-3.0
| 1,880
| 0.007447
|
import codecs
input_filename = '/home/jittat/mydoc/directadm53/payment/assignment.csv'
quota_filename = '/home/jittat/mydoc/directadm53/payment/quota.txt'
output_filename = '/home/jittat/mydoc/directadm53/payment/assignment-added.csv'
def read_quota():
q_data = {
'nat_id': {},
'firstname': {},
'lastname': {}
}
for l in codecs.open(q
|
uota_filename, encoding='utf-8', mode='r').readlines():
items = l.strip().split('\t')
l = l.strip()
if len(items)!=4:
continue
q_data['nat_id'][items[0]] = l
if items[2] in q_data['firstname']:
q_data['firstname'][items[2]].append(l)
else:
q_data['firstname'][items[2]] = [l]
|
if items[3] in q_data['lastname']:
q_data['lastname'][items[3]].append(l)
else:
q_data['lastname'][items[3]] = [l]
return q_data
def main():
q_data = read_quota()
lines = codecs.open(input_filename, encoding='utf-8', mode='r').readlines()
outfile = codecs.open(output_filename, encoding='utf-8', mode='w')
for l in lines[1:]:
items = l.strip().split(',')
if items[1] in q_data['nat_id']:
print 'OUT:', l.strip()
print >> outfile, l.strip() + ',1,16000'
continue
print >> outfile, l.strip() + ',0,0'
if items[3] in q_data['firstname']:
print 'CHECK-LAST:', l.strip()
for k in q_data['firstname'][items[3]]:
print k
print '------------------'
continue
if items[4] in q_data['lastname']:
print 'CHECK-FIRST:', l.strip()
for k in q_data['lastname'][items[4]]:
print k
print '------------------'
continue
if __name__=='__main__':
main()
|
yoriyuki/nksnd
|
nksnd/evaluate.py
|
Python
|
mit
| 2,104
| 0.003802
|
from __future__ import print_function
import sys
import argparse
import numpy as np
def max3(x, y, z):
return max(max(x, y), z)
def lcs(s1, s2):
m = len(s1)
n = len(s2)
t = np.zeros((n + 2, m + 2), dtype=int)
for j in range(1, n + 1):
for i in range(1, m + 1):
is_same = 0
if s1[i-1] == s2[j-1]:
is_same = 1
t[j, i] = max3(t[j-1, i + 1] + is_same, t[j - 1][i], t[j][i - 1])
return t[n, m]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Evaluate conversion results')
parser.add_argument('orig
|
inals',help='original texts')
parser.add_argument('converted_texts',help='converted texts')
parser.add_argument('--verbose', '-v', type=bool, help='verbose output')
args = parser.parse_args()
lcs_sum = 0
conv_sum = 0
orig_sum = 0
sentences = 0
correct_sentences = 0
with open(args.originals, 'r') as originals:
with open(args.converted_texts, 'r') as converted_texts:
origs = originals
convs = converted_texts
|
for orig, conv in zip(origs, convs):
orig.strip(' \n')
conv.strip(' \n')
sentences += 1
if orig == conv:
correct_sentences += 1
lcs_len = lcs(orig, conv)
if args.verbose:
print(u'\"{}\", \"{}\", {}'.format(orig, conv, lcs_len), file=stdout)
lcs_sum += lcs_len
conv_sum += len(conv)
orig_sum += len(orig)
precision = lcs_sum/float(conv_sum)
recall = lcs_sum/float(orig_sum)
f_value = 2 * precision * recall / (precision + recall)
sentence_accuracy = float(correct_sentences) / sentences
if args.verbose:
print(u',,,{},{},{},{}'.format(precision, recall, f_value, sentence_accuracy), file=sys.stdout)
else:
print(u'{},{},{},{}'.format(precision, recall, f_value, sentence_accuracy), file=sys.stdout)
|
wayetender/whip
|
whip/src/adapter/util/serialization.py
|
Python
|
gpl-2.0
| 1,453
| 0.007571
|
from thrift.protocol import TBin
|
aryProtocol
from thrift.transport import TTransport
import pickle
import bz2
def SerializeThriftMsg(msg, protocol_type=TBinaryProtocol.TBinaryProtocol):
"""Serialize a thrift m
|
essage using the given protocol.
The default protocol is binary.
Args:
msg: the Thrift object to serialize.
protocol_type: the Thrift protocol class to use.
Returns:
A string of the serialized object.
"""
msg.validate()
transportOut = TTransport.TMemoryBuffer()
protocolOut = protocol_type(transportOut)
msg.write(protocolOut)
return transportOut.getvalue()
def DeserializeThriftMsg(msg, data,
protocol_type=TBinaryProtocol.TBinaryProtocol):
"""Deserialize a thrift message using the given protocol.
The default protocol is binary.
Args:
msg: the Thrift object to serialize.
data: the data to read from.
protocol_type: the Thrift protocol class to use.
Returns:
Message object passed in (post-parsing).
"""
transportIn = TTransport.TMemoryBuffer(data)
protocolIn = protocol_type(transportIn)
msg.read(protocolIn)
msg.validate()
return msg
def serialize_python(msg):
#return bz2.compress(pickle.dumps(msg))
return pickle.dumps(msg)
def deserialize_python(data):
#return pickle.loads(bz2.decompress(data))
return pickle.loads(data)
|
quantsini/pyswf
|
py_swf/clients/decision.py
|
Python
|
mit
| 9,074
| 0.002976
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from botocore.vendored.requests.exceptions import ReadTimeout
from py_swf.errors import NoTaskFound
__all__ = ['DecisionClient', 'DecisionTask']
DecisionTask = namedtuple('DecisionTask', 'events task_token workflow_id workflow_run_id workflow_type')
"""Contains the metadata to execute a decision task.
See the response syntax in :meth:`~SWF.Client.poll_for_decision_task`.
"""
def nametuplefy(thing):
"""Recursively turns a dict into namedtuples."""
if type(thing) == dict:
# Only supports string keys
Dict = namedtuple('Dict', ' '.join(thing.keys()))
nametuplefied_children = {}
for k, v in thing.items():
nametuplefied_children[k] = nametuplefy(v)
return Dict(**nametuplefied_children)
if type(thing) == list:
return list(map(nametuplefy, thing))
else:
return thing
class DecisionClient(object):
"""A client that provides a pythonic API for polling and responding to decision tasks through an SWF boto3 client.
:param decision_config: Contains SWF values commonly used when making SWF api calls.
:type decision_config: :class:`~py_swf.config_definitions.DecisionConfig`
:param boto_client: A raw SWF boto3 client.
:type boto_client: :class:`~SWF.Client`
"""
def __init__(self, decision_config, boto_client):
self.decision_config = decision_config
self.boto_client = boto_client
def poll(self, identity=None, use_raw_event_history=False):
"""Opens a connection to AWS and long-polls for decision tasks.
When a decision is available, this function will return with exactly one decision task to execute.
Only returns a contiguous subset of the most recent events.
If you want to grab the entire history for a workflow, use :meth:`~py_swf.decision.DecisionClient.walk_execution_history`
Passthrough to :meth:`~SWF.Client.poll_for_decision_task`.
:param identity: A freeform text that identifies the client that performed the longpoll. Useful for debugging history.
:type identity: string
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:return: A decision task to execute.
:rtype: DecisionTask
:raises py_swf.errors.NoTaskFound: Raised when polling for a decision task times out without receiving any tasks.
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=True,
taskList={
'name': self.decision_config.task_list,
},
)
# boto doesn't like None values for optional kwargs
if identity is not None:
kwargs['identity'] = identity
try:
results = self.boto_client.poll_for_decision_task(
**kwargs
)
except ReadTimeout as e:
raise NoTaskFound(e)
# Sometimes SWF gives us an incomplete response, ignore these.
if not results.get('taskToken', None):
raise NoTaskFound('Received results with no taskToken')
events = results['events']
if not use_raw_event_history:
events = nametuplefy(events)
return DecisionTask(
events=events,
task_token=results['taskToken'],
workflow_id=results['workflowExecution']['workflowId'],
workflow_run_id=results['workflowExecution']['runId'],
workflow_type=results['workflowType'],
)
def walk_execution_history(
self,
workflow_id,
workflow_run_id,
reverse_order=True,
use_raw_event_history=False,
maximum_page_size=1000,
):
"""Lazily walks through the entire workflow history for a given workflow_id. This will make successive calls
to SWF on demand when pagination is needed.
See :meth:`~SWF.Client.get_workflow_execution_history` for more information.
:param workflow_id: The workflow_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param workflow_run_id: The workflow_run_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param reverse_order: Passthru for reverseOrder to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: bool
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:param maximum_page_size: Passthru for maximumPageSize to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: int
:return: A generator that returns successive elements in the workflow execution history.
:rtype: collections.Iterable
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=reverse_order,
execution=dict(
workflowId=workflow_id,
runId=workflow_run_id,
),
maximumPageSize=maximum_page_size,
)
while True:
results = self.boto_client
|
.get_workflow_execution_history(
**kwargs
)
next_page_token = r
|
esults.get('nextPageToken', None)
events = results['events']
for event in events:
if not use_raw_event_history:
event = nametuplefy(event)
yield event
if next_page_token is None:
break
kwargs['nextPageToken'] = next_page_token
def finish_decision_with_activity(self, task_token, activity_id, activity_name, activity_version, activity_input):
"""Responds to a given decision task's task_token to schedule an activity task to run.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param activity_id: A unique identifier for the activity task.
:type identity: string
:param activity_name: Which activity name to execute.
:type identity: string
:param activity_name: Version of the activity name.
:type identity: string
:param activity_input: Freeform text of the input for the activity
:type identity: string
:return: None
:rtype: NoneType
"""
activity_task = build_activity_task(
activity_id,
activity_name,
activity_version,
activity_input,
self.decision_config,
)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[activity_task],
)
def finish_workflow(self, task_token, result):
"""Responds to a given decision task's task_token to finish and terminate the workflow.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param result: Freeform text that represents the final result of the workflow.
:type identity: string
:return: None
:rtype: NoneType
"""
workflow_complete = build_workflow_complete(result)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[workflow_complete],
)
def build_workflow_complete(result):
return {
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttribu
|
Eloff/silvershell
|
client/silvershell/white_on_black_prefs.py
|
Python
|
bsd-3-clause
| 6,018
| 0.003157
|
# You can edit these settings and save them, they
# will be applied immediately and remembered for next time.
# This will reset the interpreter.
# ******************************************************************************* #
# If changing these settings makes the interpreter unrecoverable, you #
# can reset to defaults by right clicking and using the silverlight configuration #
# dialog to clear persistent storage for this website. #
# ******************************************************************************* #
import sys
import wpf
from silvershell import utils
# Execute code on UI thread or on background thread
BackgroundExecution = False
# Show CLR tracebacks?
ExceptionDetail = False
# Settings this higher will display more members in the completion list, but will hurt performance
# If the completion list takes too long too show, set this lower.
MaxCompletions = 100
# Setting any of these preferences to None will result in them not being applied
FontSize = 14
FontFamily = wpf.FontFamily('Courier New')
FontWeight = wpf.FontWeights.Bold
Foreground = wpf.brush('#ffffff')
BackgroundMask = wpf.brush('#cc000000')
BackgroundImage = None
TextBoxStyle = utils.load_xaml('''
<Style TargetType="TextBox"
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml">
<Setter Property="Background" Value="Transparent" />
<Setter Property="Padding" Value="0" />
<Setter Property="BorderThickness" Value="0" />
<Setter Property="Template">
<Setter.Value>
<ControlTemplate TargetType="TextBox">
<Border x:Name="ContentElement" Background="{TemplateBinding Background}" Padding="{TemplateBinding Padding}" />
</ControlTemplate>
</Setter.Value>
</Setter>
</Style>
''')
if sys.platform == 'silverlight':
ButtonStyle = utils.load_xaml('''
<Style TargetType="Button"
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:vsm="clr-namespace:System.Windows;assembly=System.Windows">
<Setter Property="FontSize" Value="14" />
<Setter Property="FontWeight" Value="Bold" />
<Setter Property="Foreground" Value="White" />
<Setter Property="Background" Value="Transparent" />
<Setter Property="Padding" Value="0" />
<Setter Property="BorderThickness" Value="0" />
<Setter Property="Template">
<Setter.Value>
<ControlTemplate TargetType="Button">
<TextBlock x:Name="RootElement" Text="{TemplateBinding Content}" TextDecorations="Underline">
<vsm:VisualStateManager.VisualStateGroups>
<vsm:VisualStateGroup x:Name="CommonStates">
<vsm:VisualStateGroup.Transitions>
<vsm:VisualTransition To="MouseOver" GeneratedDuration="0:0:0.25" />
</vsm:VisualStateGroup.Transitions>
<vsm:VisualState x:Name="Normal" />
<vsm:VisualState x:Name="MouseOver">
<Storyboard>
<ColorAnimation Storyboard.TargetName="RootElement" Storyboard.TargetProperty="(Control.Foreground).(SolidColorBrush.Color)" To="Yellow" Duration="0" />
</Storyboard>
</vsm:VisualState>
<vsm:VisualState x:Name="Pressed" />
<vsm:VisualState x:Name="Disabled" />
</vsm:VisualStateGroup>
<vsm:VisualStateGroup x:Name="FocusStates">
<vsm:VisualState x:Name="Focused" />
<vsm:VisualState x:Name="Unfocused" />
</vsm:VisualStateGroup>
</vsm:VisualStateManager.VisualStateGroups>
</TextBlock>
</ControlTemplate>
</Setter.Value>
</Setter>
</Style>
''')
else:
ButtonStyle = None
# These preferences are mandatory, setting them to None is an error
CallTip = utils.load_xaml('''
<Border
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Background="Black"
BorderThickness="2"
Padding="5"
>
<Border.BorderBrush>
<LinearGradientBrush StartPoint="0.5,0" EndPoint="0.5,1">
<GradientStop Color="#B2FFFFFF" Offset="0"/>
<GradientStop Color="#66FFFFFF" Offset="0.325"/>
<GradientStop Color="#1EFFFFFF" Offset="0.325"/>
<GradientStop Color="#51FFFFFF" Offset="1"/>
</LinearGradientBrush>
</Border.BorderBrush>
<TextBlock x:Name="CallTipLabel" TextAlignment="Left" Foreground="White" />
</Border>
''')
MemberList = utils.load_xaml('''
<Border
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Background="Black"
BorderThickness="2"
>
<Border.BorderBrush>
<LinearGradientBrush StartPoint="0.5,0" EndPoint="0.5,1">
<GradientStop Color="#B2FFFFFF" Offset="0"/>
<GradientStop Color="#66FFFFFF" Offset="0.325"/>
<GradientStop Color="#1EFFFFFF" Offset="0.325"/>
<GradientStop Color="#51FFFFFF" Offset="1"/>
|
</LinearGradientBrush>
</Border.BorderBrush>
<ListBox x:Name="MemberListBox" MaxHeight="240" />
</Border>
''')
CursorAnimation = utils.load_xaml('''
<ColorAnimationUsingKeyFrames
xmlns="%(client_ns)s"
BeginTime="0"
Storyboard.TargetProperty="(Shape.Fill).(SolidColorBrush.Color)"
>
<DiscreteColorKeyFrame Value="Transp
|
arent" KeyTime="0:0:0" />
<LinearColorKeyFrame Value="White" KeyTime="0:0:0.35" />
<DiscreteColorKeyFrame Value="White" KeyTime="0:0:0.6" />
</ColorAnimationUsingKeyFrames>
''')
|
vmindru/ansible
|
lib/ansible/plugins/callback/mail.py
|
Python
|
gpl-3.0
| 8,479
| 0.002831
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: mail
type: notification
short_description: Sends failure events via email
description:
- This callback will report failures via email
version_added: '2.0'
author:
- Dag Wieers (@dagwieers)
requirements:
- whitelisting in configuration
options:
mta:
description: Mail Transfer Agent, server that accepts SMTP
env:
- name: SMTPHOST
ini:
- section: callback_mail
key: smtphost
version_added: '2.5'
default: localhost
mtaport:
description: Mail Transfer Agent Port, port at which server SMTP
ini:
- section: callback_mail
key: smtpport
version_added: '2.5'
default: 25
to:
description: Mail recipient
ini:
- section: callback_mail
key: to
version_added: '2.5'
default: root
sender:
description: Mail sender
ini:
- section: callback_mail
key: sender
version_added: '2.5'
cc:
description: CC'd recipient
ini:
- section: callback_mail
key: cc
version_added: '2.5'
bcc:
description: BCC'd recipient
ini:
- section: callback
|
_mail
key: bcc
version_added: '2.5'
note:
- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
'''
import json
import os
import re
import smtplib
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import Callback
|
Base
class CallbackModule(CallbackBase):
''' This Ansible callback plugin mails errors to interested parties. '''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'mail'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.sender = None
self.to = 'root'
self.smtphost = os.getenv('SMTPHOST', 'localhost')
self.smtpport = 25
self.cc = None
self.bcc = None
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.sender = self.get_option('sender')
self.to = self.get_option('to')
self.smtphost = self.get_option('mta')
self.smtpport = int(self.get_option('mtaport'))
self.cc = self.get_option('cc')
self.bcc = self.get_option('bcc')
def mail(self, subject='Ansible error mail', body=None):
if body is None:
body = subject
smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
b_sender = to_bytes(self.sender)
b_to = to_bytes(self.to)
b_cc = to_bytes(self.cc)
b_bcc = to_bytes(self.bcc)
b_subject = to_bytes(subject)
b_body = to_bytes(body)
b_content = b'From: %s\n' % b_sender
b_content += b'To: %s\n' % b_to
if self.cc:
b_content += b'Cc: %s\n' % b_cc
b_content += b'Subject: %s\n\n' % b_subject
b_content += b_body
b_addresses = b_to.split(b',')
if self.cc:
b_addresses += b_cc.split(b',')
if self.bcc:
b_addresses += b_bcc.split(b',')
for b_address in b_addresses:
smtp.sendmail(b_sender, b_address, b_content)
smtp.quit()
def subject_msg(self, multiline, failtype, linenr):
return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
def indent(self, multiline, indent=8):
return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
def body_blob(self, multiline, texttype):
''' Turn some text output in a well-indented block for sending in a mail body '''
intro = 'with the following %s:\n\n' % texttype
blob = ''
for line in multiline.strip('\r\n').splitlines():
blob += '%s\n' % line
return intro + self.indent(blob) + '\n'
def mail_result(self, result, failtype):
host = result._host.get_name()
if not self.sender:
self.sender = '"Ansible: %s" <root>' % host
# Add subject
if self.itembody:
subject = self.itemsubject
elif result._result.get('failed_when_result') is True:
subject = "Failed due to 'failed_when' condition"
elif result._result.get('msg'):
subject = self.subject_msg(result._result['msg'], failtype, 0)
elif result._result.get('stderr'):
subject = self.subject_msg(result._result['stderr'], failtype, -1)
elif result._result.get('stdout'):
subject = self.subject_msg(result._result['stdout'], failtype, -1)
elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
subject = self.subject_msg(result._result['exception'], failtype, -1)
else:
subject = '%s: %s' % (failtype, result._task.name or result._task.action)
# Make playbook name visible (e.g. in Outlook/Gmail condensed view)
body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
if result._task.name:
body += 'Task: %s\n' % result._task.name
body += 'Module: %s\n' % result._task.action
body += 'Host: %s\n' % host
body += '\n'
# Add task information (as much as possible)
body += 'The following task failed:\n\n'
if 'invocation' in result._result:
body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
elif result._task.name:
body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
else:
body += self.indent('%s\n' % result._task.action)
body += '\n'
# Add item / message
if self.itembody:
body += self.itembody
elif result._result.get('failed_when_result') is True:
body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
elif result._result.get('msg'):
body += self.body_blob(result._result['msg'], 'message')
# Add stdout / stderr / exception / warnings / deprecations
if result._result.get('stdout'):
body += self.body_blob(result._result['stdout'], 'standard output')
if result._result.get('stderr'):
body += self.body_blob(result._result['stderr'], 'error output')
if result._result.get('exception'): # Unrelated exceptions are added to output :-/
body += self.body_blob(result._result['exception'], 'exception')
if result._result.get('warnings'):
for i in range(len(result._result.get('warnings'))):
body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
if result._result.get('deprecations'):
for i in range(len(result._result.get('deprecations'))):
body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
body += 'and a complete dump of the error:\n\n'
body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
self.mail(subject=subject, body=body)
def v2_playbook_on_start(self, playbook):
self.playbook = playbook
self.itembody = ''
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors:
return
self.mail_result(result, 'Failed')
def v2_runner_on_unreachable(self, result):
self.mail_result(result, 'Unreachable')
def v2_runner_on_async_failed(self, result):
self.mail_result(result, 'Async failure
|
mindriot101/k2catalogue
|
k2catalogue/k2logging.py
|
Python
|
mit
| 350
| 0.002857
|
import logging
logging.basicConfig(
level=logging.INFO, format='%(asctime)s|%(name)s|%(le
|
velname)s|%(message)s')
logging.getLogger('vcr.stubs').setLevel(logging.WARNING)
logging.getL
|
ogger('requests.packages.urllib3.connectionpool')\
.setLevel(logging.WARNING)
def get_logger(*args, **kwargs):
return logging.getLogger(*args, **kwargs)
|
tnadeau/pybvc
|
samples/sampleopenflow/demos/demo42.py
|
Python
|
bsd-3-clause
| 8,783
| 0.00353
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the
|
following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright hold
|
er nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetMplsTTLAction,
DecMplsTTLAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_MPLS_UCAST
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_42():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 42 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify MPLS TTL example1"
priority = 900
cookie = 1300
match_in_port = 3
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_mod_mpls_ttl = 2
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Set MPLS TTL (%s)\n"
" Output (%s)" %
(act_mod_mpls_ttl, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetMplsTTLAction(action_order)
action.set_ttl(act_mod_mpls_ttl)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify MPLS TTL example2"
priority = 900
cookie = 1300
match_in_port = 112
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_out_port = 3
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Decrement MPLS TTL\n"
" Output (%s)" %
(act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = DecMplsTTLAction(action_order)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status
|
herow/planning_qgis
|
tests/src/python/test_qgscomposerlabel.py
|
Python
|
gpl-2.0
| 4,750
| 0.017053
|
# -*- coding: utf-8 -*-
'''
test_qgscomposerlabel.py
--------------------------------------
Date : Oct 2012
Copyright : (C) 2012 by Dr. Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis
import unittest
from utilities import getQgisTestApp, unitTestDataPath
from PyQt4.QtCore import QFileInfo, QDate, QDateTime
from qgis.core import QgsVectorLayer, QgsMapLayerRegistry, QgsMapRenderer, QgsComposition, QgsComposerLabel, QgsFeatureRequest, QgsFeature, QgsExpression
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsComposerLabel(unittest.TestCase):
def testCase(self):
TEST_DATA_DIR = unitTestDataPath()
vectorFileInfo = QFileInfo( TEST_DATA_DIR + "/france_parts.shp")
mVectorLayer = QgsVectorLayer( vectorFileInfo.filePath(), vectorFileInfo.completeBaseName(), "ogr" )
QgsMapLayerRegistry.instance().addMapLayers( [mVectorLayer] )
# create composition with composer map
mMapRenderer = QgsMapRenderer()
layerStringList = []
layerStringList.append( mVectorLayer.id() )
mMapRenderer.setLayerSet( layerStringList )
mMapRenderer.setProjectionsEnabled( False )
mComposition = QgsComposition( mMapRenderer )
mComposition.setPaperSize( 297, 210 )
|
mLabel = QgsComposerLabel( mComposition )
mComposition.addComposerLabel( mLabel )
self.evaluation_test( mComposition, mLabel )
self.feature_evaluation_test( mComposition, mLabel, mVectorLayer )
self.page_evaluation_test( mComposition, mLabel, mVectorLayer )
def evaluation_test( self, m
|
Composition, mLabel ):
# $CURRENT_DATE evaluation
mLabel.setText( "__$CURRENT_DATE__" )
assert mLabel.displayText() == ( "__" + QDate.currentDate().toString() + "__" )
# $CURRENT_DATE() evaluation
mLabel.setText( "__$CURRENT_DATE(dd)(ok)__" )
expected = "__" + QDateTime.currentDateTime().toString( "dd" ) + "(ok)__"
assert mLabel.displayText() == expected
# $CURRENT_DATE() evaluation (inside an expression)
mLabel.setText( "__[%$CURRENT_DATE(dd) + 1%](ok)__" )
dd = QDate.currentDate().day()
expected = "__%d(ok)__" % (dd+1)
assert mLabel.displayText() == expected
# expression evaluation (without associated feature)
mLabel.setText( "__[%\"NAME_1\"%][%21*2%]__" )
assert mLabel.displayText() == "__[NAME_1]42__"
def feature_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
provider = mVectorLayer.dataProvider()
fi = provider.getFeatures( QgsFeatureRequest() )
feat = QgsFeature()
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
mLabel.setText( "[%\"NAME_1\"||'_ok'%]")
assert mLabel.displayText() == "Basse-Normandie_ok"
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
assert mLabel.displayText() == "Bretagne_ok"
# evaluation with local variables
locs = { "$test" : "OK" }
mLabel.setExpressionContext( feat, mVectorLayer, locs )
mLabel.setText( "[%\"NAME_1\"||$test%]" )
assert mLabel.displayText() == "BretagneOK"
def page_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
mComposition.setNumPages( 2 )
mLabel.setText( "[%$page||'/'||$numpages%]" )
assert mLabel.displayText() == "1/2"
# move the the second page and re-evaluate
mLabel.setItemPosition( 0, 320 )
assert mLabel.displayText() == "2/2"
# use setSpecialColumn
mLabel.setText( "[%$var1 + 1%]" )
QgsExpression.setSpecialColumn( "$var1", 41 )
assert mLabel.displayText() == "42"
QgsExpression.setSpecialColumn( "$var1", 99 )
assert mLabel.displayText() == "100"
QgsExpression.unsetSpecialColumn( "$var1" )
assert mLabel.displayText() == "[%$var1 + 1%]"
if __name__ == '__main__':
unittest.main()
|
mlskit/astromlskit
|
REGRESSION/lassofront.py
|
Python
|
gpl-3.0
| 3,925
| 0.000764
|
# -*- coding: utf-8 -*-
# Form implem
|
entation generated from reading ui file 'lassoui.ui'
#
# Created: Sat Apr 11 09:14:27 2015
# by: PyQt4 UI code generato
|
r 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(235, 342)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 211, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(10, 70, 211, 171))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setGeometry(QtCore.QRect(50, 20, 111, 16))
self.label.setObjectName(_fromUtf8("label"))
self.doubleSpinBox = QtGui.QDoubleSpinBox(self.groupBox_2)
self.doubleSpinBox.setGeometry(QtCore.QRect(110, 20, 62, 22))
self.doubleSpinBox.setObjectName(_fromUtf8("doubleSpinBox"))
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setGeometry(QtCore.QRect(30, 60, 111, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.spinBox = QtGui.QSpinBox(self.groupBox_2)
self.spinBox.setGeometry(QtCore.QRect(110, 60, 61, 22))
self.spinBox.setMaximum(10000000)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.checkBox = QtGui.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(30, 90, 81, 17))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.checkBox_2 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_2.setGeometry(QtCore.QRect(120, 90, 121, 17))
self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
self.checkBox_3 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_3.setGeometry(QtCore.QRect(30, 120, 81, 17))
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(40, 280, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(40, 250, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Regressor Name", None))
self.lineEdit.setText(_translate("Form", "LASSO(L1)", None))
self.groupBox_2.setTitle(_translate("Form", "Options", None))
self.label.setText(_translate("Form", "Alpha", None))
self.label_2.setText(_translate("Form", "Max iterations", None))
self.checkBox.setText(_translate("Form", " Normalise", None))
self.checkBox_2.setText(_translate("Form", "Positive", None))
self.checkBox_3.setText(_translate("Form", "Fit intercept", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.pushButton.setText(_translate("Form", "Input File", None))
|
yrunts/python-for-qa
|
4-http-json-xml-html/examples/html_parse.py
|
Python
|
cc0-1.0
| 358
| 0.002793
|
from lxml import html
def main():
dom = ht
|
ml.parse(('http://www.amazon.com/Apple-MH0W2LL-10-Inch-Retina-'
'Display/dp/B00OTWOAAQ/ref=sr_1_1?s=pc&ie=UTF8&'
'qid=1459799371&sr=1-1&ke
|
ywords=ipad'))
title = dom.find('//*[@id="productTitle"]')
print(title.text)
if __name__ == '__main__':
main()
|
dmnfarrell/peat
|
pKaTool/titration_class.py
|
Python
|
mit
| 5,464
| 0.010615
|
#!/usr/bin/env python
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
class titration_curve:
def __init__(self,curves):
self.curves=curves.copy()
not_allowed=['pKa','pka']
for key in not_allowed:
if self.curves.has_key(key):
del self.curves[key]
return
#
# ----
#
def __sub__(self,other):
"""subtract two titration curves"""
diff=0.0
for group in self.curves.keys():
if not other.curves.has_key(group):
continue
for ph in self.curves[group].keys():
if other.curves[group].has_key(ph):
diff=diff+abs(self.curves[group][ph]-other.curves[group][ph])
return diff
#
# ----
#
def subtract_individually(self,other):
"""Subtract curves individually"""
diff=[]
for group in self.curves.keys():
if not other.curves.has_key(group):
diff.append(0.0)
continue
this_diff = 0.0
for ph in self.curves[group].keys():
if other.curves[group].has_key(ph):
this_diff=this_diff+abs(self.curves[group][ph]-other.curves[group][ph])
diff.append(this_diff)
return diff
#
# ----
#
def sub_scaled(self,other):
"""scaled difference btw two titration curves"""
diff=0.0
for group in self.curves.keys():
if not other.curves.has_key(group):
continue
raise 'incompatible titration curves'
for ph in self.curves[group].keys():
if other.curves[group].has_key(ph):
diff=diff+self.scale(self.curves[group][ph],other.curves[group][ph])*abs(self.curves[group][ph]-other.curves[group][ph])
return diff
#
# ----
#
def scale(self,frac1,frac2):
"""Scales the error on a titration point """
return max(self.scale_function(frac1),self.scale_function(frac2))
#
# ----
#
def scale_function(self, x):
"""Calculates the scaling functionfor scaling """
return -pow(abs(x)-1,2)+1
#
# ----
#
def experimental_uncertainty(self, pH_uncertainty=0.1):
"""estimates the experimental uncertainty of titration curves"""
res=0.0
count = 0
for group in self.curves.keys():
#print 'Now estimating for ',group
pHs = self.curves[group].keys()
#make sure that ph values are sorted
pHs.sort()
for i in range(len(pHs)):
bw_diff = 0
fw_diff = 0
try:
bw_diff = (self.curves[group][pHs[i]]-self.curves[group][pHs[i-1]])/(pHs[i]-pHs[i-1])
except:
pass
try:
fw_diff = (self.curves[group][pHs[i+1]]-self.curves[group][pHs[i]])/(pHs[i+1]-pHs[i])
except:
pass
avr_diff = (bw_diff+fw_diff)/2 ##### abs()?
res += avr_diff
count += 1
res *= pH_uncertainty
res = abs(res)
avr_res = res / float(count)
return res, avr_res
#
# ----
#
def sub_HHd_scaled(self, exp_data, pkas):
"""Calculates error with scaling based on deviation of exp data from the Henderson-Hasselbalch eq"""
diff=0.0
scales = exp_data.deviation_from_henderson_hasselbalch(pkas)
for group in self.curves.keys():
if not exp_data.curves.has_key(group):
continue
for ph in self.curves[group].keys():
if exp_data.curves[group].has_key(ph):
diff=diff+ scales[group][ph]*abs(self.curves[group][ph]-exp_data.curves[group][ph])
return diff
#
# -----
#
def deviation_from_henderson_hasselbalch(self, pKas):
"""Calculates the deviation from the Henderson-Hasselbalch equation for all points given pKa values"""
HH_deviation = {}
deviation = lambda ph,pka,exp: abs(1/(1+pow(10,ph-p
|
ka))-1-exp)
for group in self.curves.keys():
if pKas.has_key(group):
pka = pKas[group]
HH_dev
|
iation[group] = {}
for ph in self.curves[group].keys():
try:
HH_deviation[group][ph] = deviation(float(ph),float(pka),float(self.curves[group][ph]))
except:
pass
return HH_deviation
|
dagwieers/dstat
|
plugins/dstat_vmk_nic.py
|
Python
|
gpl-2.0
| 2,648
| 0.009819
|
### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware ESX kernel vmknic stats
### Displays VMkernel port statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -N argument.
# EXAMPLES:
# # dstat --vmknic -N vmk1
# You can even combine the Linux and VMkernel network stats (just don't just "total").
# # dstat --vmknic -n -N vmk0,vswif0
# NB Data comes from /proc/vmware/net/tcpip/ifconfig
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmknic'
self.nick = ('recv', 'send')
self.open('/proc/vmware/net/tcpip/ifconfig')
self.cols = 2
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception('Needs VMware ESX')
info(1, 'The vmknic module is an EXPERIMENTAL module.')
def discover(self, *list):
ret = []
for l in self.fd[0].splitlines(replace=' /', delim='/'):
if len(l) != 12: continue
if l[2][:5] == '<Link': continue
if ','.join(l) == 'Name,Mtu/TSO,Network,Address,Ipkts,Ierrs,Ibytes,Opkts,Oerrs,Obytes,Coll,Time': continue
if l[0] == 'lo0': continue
if l[0] == 'Usage:': continue
ret.append(l[0])
ret.sort()
for item in list: ret.append(item)
return ret
def vars(self):
ret = []
if op.netlist:
list = op.netlist
else:
list = self.discover
list.sort()
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def name(self):
return ['net/'+name for name in self.vars]
def extract(self):
self.s
|
et2['total'] = [0, 0]
for line
|
in self.fd[0].readlines():
l = line.replace(' /','/').split()
if len(l) != 12: continue
if l[2][:5] == '<Link': continue
if ','.join(l) == 'Name,Mtu/TSO,Network,Address,Ipkts,Ierrs,Ibytes,Opkts,Oerrs,Obytes,Coll,Time': continue
if l[0] == 'Usage:': continue
name = l[0]
if name in self.vars:
self.set2[name] = ( int(l[6]), int(l[9]) )
if name != 'lo0':
self.set2['total'] = ( self.set2['total'][0] + int(l[6]), self.set2['total'][1] + int(l[9]) )
if update:
for name in self.set2:
self.val[name] = list(map(lambda x, y: (y - x) * 1.0 / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
|
oldm/OldMan
|
oldman/validation/__init__.py
|
Python
|
bsd-3-clause
| 21
| 0
|
__author__ =
|
'benji'
|
|
terzeron/FeedMakerApplications
|
funbe/capture_item_funbe.py
|
Python
|
gpl-2.0
| 1,590
| 0.000629
|
#!/usr/bin/env python
import sys
import re
import getopt
from typing import List, Tuple
from feed_maker_util import IO
def main() -> int:
link: str = ""
title: str = ""
url_prefix = ""
state = 0
num_of_recent_feeds = 1000
optlist, _ = getopt.getopt(sys.argv[1:], "f:n:")
for o, a in optlist:
if o == '-n':
num_of_recent_feeds =
|
int(a)
line_list = IO.read_stdin_as_line_list()
result_list: List[Tuple[str, str]] = []
for line in
|
line_list:
if state == 0:
m = re.search(r'var\s+g5_url\s+=\s+"(?P<url_prefix>[^"]+)";', line)
if m:
url_prefix = m.group("url_prefix")
state = 1
elif state == 1:
m = re.search(r'<td[^>]*name="view_list"[^>]*data-role="(?P<link>[^"]+)">', line)
if m:
link = m.group("link")
link = re.sub(r'&', '&', link)
link = url_prefix + link
state = 2
elif state == 2:
m = re.search(r'<td[^>]*class="content__title"', line)
if m:
state = 3
elif state == 3:
m = re.search(r'\s*(?P<title>\S[^<>]*)(?:</td>)?\s*', line)
if m:
title = m.group("title")
result_list.append((link, title))
state = 1
num = len(result_list)
for (link, title) in result_list[:num_of_recent_feeds]:
print("%s\t%03d. %s" % (link, num, title))
num = num - 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
shawnadelic/shuup
|
shuup_tests/xtheme/test_edit.py
|
Python
|
agpl-3.0
| 681
| 0
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.xtheme.editing import could_edit, is_edit_mode, set_edit_mode
from shuup_tests.utils.faux_users import SuperUser
def test_edit_priv(rf):
request = rf.get("/")
request.user = SuperUser()
request.session = {}
assert could_edit(request)
assert not is_edit_mode(
|
request)
set_edit_mode(request, True)
assert is_edit_mode(request)
set_edit_mode(request, False)
assert not is_edit
|
_mode(request)
|
johnwallace123/dx-toolkit
|
src/python/dxpy/cli/workflow.py
|
Python
|
apache-2.0
| 11,696
| 0.00436
|
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This submodule contains workflow-based commands for the dx
command-line client.
'''
fr
|
om __future__ import print_function, unicode_literals, division, absolute_import
import dxpy
import dxpy.utils.printing as printing
from .parsers import (process_data
|
object_args, process_single_dataobject_output_args,
process_instance_type_arg)
from ..utils.describe import io_val_to_str
from ..utils.resolver import (resolve_existing_path, resolve_path, is_analysis_id)
from ..exceptions import (err_exit, DXCLIError, InvalidState)
from . import (try_call, try_call_err_exit)
def new_workflow(args):
try_call(process_dataobject_args, args)
try_call(process_single_dataobject_output_args, args)
init_from = None
if args.init is not None:
if is_analysis_id(args.init):
init_from = args.init
else:
init_project, _init_folder, init_result = try_call(resolve_existing_path,
args.init,
expected='entity')
init_from = dxpy.get_handler(init_result['id'], project=init_project)
if args.output is None:
project = dxpy.WORKSPACE_ID
folder = dxpy.config.get("DX_CLI_WD", "/")
name = None
else:
project, folder, name = try_call(dxpy.utils.resolver.resolve_path, args.output)
if args.output_folder is not None:
try:
# Try to resolve to a path in the project
_ignore, args.output_folder, _ignore = resolve_path(args.output_folder, expected='folder')
except:
# But if not, just use the value directly
pass
try:
dxworkflow = dxpy.new_dxworkflow(title=args.title, summary=args.summary,
description=args.description,
output_folder=args.output_folder,
project=project, name=name,
tags=args.tags, types=args.types,
hidden=args.hidden, properties=args.properties,
details=args.details,
folder=folder,
parents=args.parents, init_from=init_from)
if args.brief:
print(dxworkflow.get_id())
else:
dxpy.utils.describe.print_desc(dxworkflow.describe(incl_properties=True, incl_details=True),
args.verbose)
except:
err_exit()
def get_workflow_id_and_project(path):
'''
:param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message.
'''
project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected='entity')
try:
if entity_result is None or not entity_result['id'].startswith('workflow-'):
raise DXCLIError('Could not resolve "' + path + '" to a workflow object')
except:
err_exit()
return entity_result['id'], project
def add_stage(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
# get executable
exec_handler = try_call(dxpy.utils.resolver.get_exec_handler,
args.executable,
args.alias)
exec_inputs = dxpy.cli.exec_io.ExecutableInputs(exec_handler)
try_call(exec_inputs.update_from_args, args, require_all_inputs=False)
# get folder path
folderpath = None
if args.output_folder is not None:
try:
_ignore, folderpath, _none = resolve_path(args.output_folder, expected='folder')
except:
folderpath = args.output_folder
elif args.relative_output_folder is not None:
folderpath = args.relative_output_folder
# process instance type
try_call(process_instance_type_arg, args)
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
stage_id = try_call(dxworkflow.add_stage,
exec_handler,
name=args.name,
folder=folderpath,
stage_input=exec_inputs.inputs,
instance_type=args.instance_type)
if args.brief:
print(stage_id)
else:
dxpy.utils.describe.print_desc(dxworkflow.describe())
def list_stages(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
desc = dxworkflow.describe()
print((printing.BOLD() + printing.GREEN() + '{name}' + printing.ENDC() + ' ({id})').format(**desc))
print()
print('Title: ' + desc['title'])
print('Output Folder: ' + (desc.get('outputFolder') if desc.get('outputFolder') is not None else '-'))
if len(desc['stages']) == 0:
print()
print(' No stages; add stages with the command "dx add stage"')
for i, stage in enumerate(desc['stages']):
stage['i'] = i
print()
if stage['name'] is None:
stage['name'] = '<no name>'
print((printing.UNDERLINE() + 'Stage {i}' + printing.ENDC() + ': {name} ({id})').format(**stage))
print('Executable {executable}'.format(**stage) + \
(" (" + printing.RED() + "inaccessible" + printing.ENDC() + ")" \
if stage.get('accessible') is False else ""))
if stage['folder'] is not None and stage['folder'].startswith('/'):
stage_output_folder = stage['folder']
else:
stage_output_folder = '<workflow output folder>/' + (stage['folder'] if stage['folder'] is not None else "")
print('Output Folder {folder}'.format(folder=stage_output_folder))
if "input" in stage and stage["input"]:
print('Bound input ' + \
('\n' + ' '*16).join([
'{key}={value}'.format(key=key, value=io_val_to_str(stage["input"][key])) for
key in stage['input']
]))
def remove_stage(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
try:
args.stage = int(args.stage)
except:
pass
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
stage_id = try_call(dxworkflow.remove_stage, args.stage)
if args.brief:
print(stage_id)
else:
print("Removed stage " + stage_id)
def update_workflow(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
if not any([args.title, args.no_title, args.summary, args.description, args.output_folder,
args.no_output_folder]):
print('No updates requested; none made')
return
if args.output_folder is not None:
try:
# Try to resolve to an existing path in the project
_ignore, args.output_folder, _ignore = resolve_path(args.output_folder, expected='folder')
except:
# But if not, just use the value directly
pass
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
try_call(dxworkflow.update,
title=args.title,
unset_title=args.no_title
|
cavaunpeu/vanilla-neural-nets
|
vanilla_neural_nets/recurrent_neural_network/loss_function.py
|
Python
|
mit
| 606
| 0.008251
|
import itertools
import numpy as np
from vanilla
|
_neural_nets.base.loss_function import BaseLossFunction
class CrossEntropyLoss(BaseLossFunction):
|
@classmethod
def loss(cls, y_true, y_predicted):
return cls.total_loss(y_true=y_true, y_predicted=y_predicted) / len(y_true)
@classmethod
def total_loss(cls, y_true, y_predicted):
row_indices = np.arange( len(y_true) )
column_indices = y_true
return np.sum([ -np.log(y_predicted[row_indices, column_indices]) ])
@classmethod
def derivative_of_loss_function(cls, y_true, y_predicted):
pass
|
domob1812/bitcoin
|
test/functional/mempool_packages.py
|
Python
|
mit
| 16,008
| 0.003686
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.messages import COIN
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
chain_transaction,
)
# default limits
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
# custom limits for node1
MAX_ANCESTORS_CUSTOM = 5
MAX_DESCENDANTS_CUSTOM = 10
assert MAX_DESCENDANTS_CUSTOM >= MAX_ANCESTORS_CUSTOM
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
"-maxorphantx=1000",
"-whitelist=noban@127.0.0.1", # immediate tx relay
],
[
"-maxorphantx=1000",
"-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM),
"-limitdescendantcount={}".format(MAX_DESCENDANTS_CUSTOM),
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some blocks and have them mature.
peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
assert 'ancestorcount' not in utxo[0]
assert 'ancestorsize' not in utxo[0]
assert 'ancestorfees' not in utxo[0]
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
witness_chain = []
ancestor_vsize = 0
ancestor_fees = Decimal(0)
for i in range(MAX_ANCESTORS):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [0], value, fee, 1)
value = sent_value
chain.append(txid)
# We need the wtxids to check P2P announcements
fulltx = self.nodes[0].getrawtransaction(txid)
witnesstx = self.nodes[0].decoderawtransaction(fulltx, True)
witness_chain.append(witnesstx['hash'])
# Check that listunspent ancestor{count, size, fees} yield the correct results
wallet_unspent = self.nodes[0].listunspent(minconf=0)
this_unspent = next(utxo_info for utxo_info in wallet_unspent if utxo_info['txid'] == txid)
assert_equal(this_unspent['ancestorcount'], i + 1)
ancestor_vsize += self.nodes[0].getrawtransaction(txid=txid, verbose=True)['vsize']
assert_equal(this_unspent['ancestorsize'], ancestor_vsize)
ancestor_fees -= self.nodes[0].gettransaction(txid=txid)['fee']
assert_equal(this_unspent['ancestorfees'], ancestor_fees * COIN)
# Wait until mempool transactions have passed initial broadcast (sent inv and received getdata)
# Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
peer_inv_store.wait_for_broadcast(witness_chain)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_vsize = 0
assert_equal(ancestor_vsize, sum([mempool[tx]['vsize'] for tx in mempool]))
ancestor_count = MAX_ANCESTORS
assert_equal(ancestor_fees, sum([mempool[tx]['fee'] for tx in mempool]))
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(entry['descendantcount'], descendant_count)
descendant_fees += entry['fee']
assert_equal(entry['modifiedfee'], entry['fee'])
assert_equal(entry['fees']['base'], entry['fee'])
assert_equal(entry['fees']['modified'], entry['modifiedfee'])
assert_equal(entry['descendantfees'], descendant_fees * COIN)
assert_equal(entry['fees']['descendant'], descendant_fees)
descendant_vsize += entry['vsize']
assert_equal(entry['descendantsize'], descendant_vsize)
descendant_count += 1
# Check that ancestor calculations are correct
assert_equal(entry['ancestorcount'], ancestor_count)
assert_equal(entry['ancestorfees'], ancestor_fees * COIN)
assert_equal(entry['ancestorsize'], ancestor_vsize)
ancestor_vsize -= entry['vsize']
ancestor_fees -= entry['fee']
ancestor_count -= 1
# Check that parent/child list is correct
assert_equal(entry['spentby'], descendants[-1:])
assert_equal(entry['depends'], ancestors[-2:-1])
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
# Check getmempooldescendants verbose output is correct
for descendant, dinfo in self.nodes[0].getmempooldescendants(x, True).items():
assert_equal(dinfo['depends'], [chain[chain.index(descendant)-1]])
if dinfo['descendantcount'] > 1:
assert_equal(dinfo['spentby'], [chain[chain.index(descendant)+1]])
else:
assert_equal(dinfo['spentby'], [])
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors verbose output is correct
for ancestor, ainfo in self.nodes[0].getmempoolancestors(x, True).items():
as
|
sert_equal(ainfo['s
|
pentby'], [chain[chain.index(ancestor)+1]])
if ainfo['ancestorcount'] > 1:
assert_equal(ainfo['depends'], [chain[chain.index(ancestor)-1]])
else:
assert_equal(ainfo['depends'], [])
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert chain[-1] not in v_ancestors.keys()
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert chain[0] not in v_descendants.keys()
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
ancestor_fees = 0
for x in chain:
entry = self.nodes[0].getmempoolentry(x)
ancestor_fees += entry['fee']
assert_equal(entry['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
assert_equal(entry['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
descend
|
amanzi/ats-dev
|
tools/utils/plot_wrm.py
|
Python
|
bsd-3-clause
| 6,867
| 0.011213
|
from matplotlib import pyplot as plt
import numpy as np
class Spline(object):
"""Forms a cublic spline on an interval given values and derivatives at the endpoints of that interval."""
def __init__(self, x1, y1, dy1, x2, y2, dy2):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.dy1 = dy1
self.dy2 = dy2
def T(self, x):
return (x - self.x1) / (self.x2 - self.x1)
def Value(self, x):
t = self.T(x)
return (1-t)**2 * ((1+2*t) * self.y1 + t * (self.x2 - self.x1) * self.dy1) \
+ t**2 * ((3-2*t) * self.y2 + (t-1) * (self.x2 - self.x1) * self.dy2)
def Derivative(self, x):
t = self.T(x)
dtdx = 1./(self.x2 - self.x1)
dydt = (6*t**2 - 6*t)* self.y1 \
+ (3*t**2 - 4*t + 1) * (self.x2 - self.x1) * self.dy1 \
+ (-6*t**2 + 6*t) * self.y2 \
+ (3*t**2 - 2*t) * (self.x2 - self.x1) * self.dy2
return dydt * dtdx
class VanGenuchten(object):
def __init__( self, alpha, n, sr, l=0.5, smoothing_interval_sat=0.0, smoothing_interval_p=0.0 ):
self._alpha = alpha
self._n = n
self._sr = sr
self._l = l
self._m = 1 - 1.0/n
# smoothing for sat
self._s0 = 1.0 - smoothing_interval_sat
if self._s0 < 1.:
self._spline = Spline(self._s0, self.k_relative(self._s0), self.d_k_relative(self._s0),
1.0, 1.0, 0.)
# smoothing for pc
self._pc0 = smoothing_interval_p
if self._pc0 > 0.:
self._spline_sat = Spline(0., 1., 0., self._pc0, self.saturation(self._pc0), self.d_saturation(self._pc0))
def capillaryPressure( self, s ):
if s <= self._sr:
return np.inf
if s >= 1.:
return 0.
se = (s - self._sr) / (1.0 - self._sr)
if (se < 1.e-8):
return pow(se, -1.0/(self._m * self._n)) / self._alpha
else:
return (pow(pow(se, -1.0/self._m) - 1.0, 1/self._n)) / self._alpha
def saturation( self, pc ):
if pc <= 0.0:
return 1.0
elif pc < self._pc0:
return self._spline_sat.Value(pc)
else:
se = pow(1.0 + pow(self._alpha*pc, self._n), -self._m)
return se * (1.0 - self._sr) + self._sr
def k_relative( self, s ):
if s >= 1.:
return 1.
elif s <= self._sr:
return 0.
elif s <= self._s0:
se = (s - self._sr) / (1.0-self._sr)
return (se**self._l) * pow( 1.0 - pow( 1.0 - pow(se,1.0/self._m),self._m), 2)
else:
return self._spline.Value(s)
def d_k_relative( self, s ):
if s >= 1.:
return 0
elif s <= self._sr:
return 0.
elif s <= self._s0 + 1.e-6:
se = (s - self._sr)/(1-self._sr);
x = pow(se, 1.0 / self._m);
if (abs(1.0 - x) < 1.e-10):
return 0.0;
y = pow(1.0 - x, self._m);
dkdse = (1.0 - y) * (self._l * (1.0 - y) + 2 * x * y / (1.0 - x)) * pow(se, self._l - 1.0);
return dkdse / (1 - self._sr);
else:
return self._spline.Derivative(s)
def label( self ):
return "VG: a=%1.2e [1/Pa], n=%1.2g, sr=%1.2g, smooth=%g"%(self._alpha, self._n, self._sr, 1-self._s0)
def short_label( self ):
return "VG: a=%1.2e [1/Pa], n=%1.2g, sr=%1.2g"%(self._alpha, self._n, self._sr)
def plot(self, ax=None, color='b', format='-', label=None, y_units='Pa'):
pc = np.linspace(0,7, 1000)
pc = 10**pc
if label is None:
label = self.short_label()
if ax is None:
fig,ax = plt.subplots(1,1,squeeze=True)
s = np.array([self.saturation(apc) for apc in pc])
if y_units == 'hPa':
pc = pc / 100.
elif y_units == 'm':
pc = pc / 1000 / 9.81
elif y_units == 'cm':
pc = pc / 1000 / 9.81 * 100
elif y_units == 'Pa':
pass
else:
raise ValueError("Invalid units for yaxis, must be one of [Pa, m, cm, hPa]")
ax.semilogy(s, pc, color=color, label=label)
ax.set_xlabel("saturation [-]")
ax.set_ylabel("capillary pressure [{}]".format(y_units))
return ax
def plot_kr(self, ax=None, color='b', format='-', label=None):
if ax is None:
fig,ax = plt.subplots(1,1,squeeze=True)
if label is None:
label = self.short_label()
pc = np.linspace(0,7, 1000)
pc = 10**pc
sat = np.array([self.saturation(apc) for apc in pc])
kr = np.array([self.k_relative(s) for s in sat])
ax.plot(sat, kr, color=color, label=label)
if __name__ == "__main__":
import sys
import argparse
import shlex
import colors
parser = argparse.ArgumentParser('plot WRM curves')
def option_to_wrm(s):
print("got: {}".format(s))
try:
s = shlex.split(s)
print("s = {}".format(s))
ass
|
ert(3 <= len(s) <= 5)
alpha, n, sr = map(float, s[0:3])
if len(s) > 3:
label = s[3]
else:
label = None
if len(s) > 4:
smooth_int_sat = float(s[4])
else:
smooth_int_sat = 0.
print("WRM:")
|
print(f" alpha = {alpha}")
print(f" n = {n}")
print(f" sr = {sr}")
print(f" smoothing_interval_sat = {smooth_int_sat}")
print(f" label = {label}")
except:
raise argparse.ArgumentTypeError("WRM must be van Genucten parameters (alpha, n, sr, label, smoothing_interval_sat)")
else:
return label, VanGenuchten(alpha=alpha, n=n, sr=sr, smoothing_interval_sat=smooth_int_sat)
parser.add_argument('--wrm', type=option_to_wrm, action='append', help='WRM parameters, "alpha n sr [label [smoothing_interval_sat]]"')
parser.add_argument('--y-units', type=str, choices=['Pa','m','hPa','cm'], default='Pa', help='units of the y-axis, in log space')
parser.add_argument('--kr', action='store_true', help='Plot relative permeability curve')
args = parser.parse_args()
color_list = colors.enumerated_colors(len(args.wrm))
fig = plt.figure()
ax = fig.add_subplot(111)
if args.kr:
for (label,wrm), color in zip(args.wrm, color_list):
wrm.plot_kr(ax, color, label=label)
else:
for (label,wrm), color in zip(args.wrm, color_list):
wrm.plot(ax, color, y_units=args.y_units, label=label)
ax.legend()
plt.show()
sys.exit(0)
|
siosio/intellij-community
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsPylintNoElseBoth.py
|
Python
|
apache-2.0
| 183
| 0.005464
|
def fu
|
nc():
value = "not-none"
# pylint: disable=unused-argument1
<caret>if value is None:
print("None")
# py
|
lint: disable=unused-argument2
print(value)
|
takeshineshiro/nova
|
nova/tests/unit/objects/test_objects.py
|
Python
|
apache-2.0
| 68,588
| 0.000029
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import fixtures
import mock
from oslo_log import log
from oslo_utils import timeutils
from oslo_versionedobjects import exception as ovo_exc
from oslo_versionedobjects import fixture
import six
from testtools import matchers
from
|
nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLog
|
ger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.IntegerField()}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.IntegerField(default=1),
'bar': fields.StringField(),
'missing': fields.StringField(),
'readonly': fields.IntegerField(read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(base.NovaObject):
fields = {
'bar': fields.StringField(),
}
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
@base.NovaObjectRegistry.register_if(False)
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.StringField()}
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
@base.NovaObjectRegistry.register_if(False)
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
base.NovaObjectRegistry.register(MyObj)
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
base.NovaObjectRegistry.register(MyObj)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_ob
|
h-2/seqan
|
misc/trac_plugins/DocLinks/doc_links/macro.py
|
Python
|
bsd-3-clause
| 7,207
| 0.002498
|
"""Seqan Doc Links for Trac.
Version 0.1.
Copyright (C) 2010 Manuel Holtgrewe
Install by copying this file into the plugins directory of your trac
work directory. In your trac.ini, you can use something like this
(the following also shows the defaults).
[seqan_doc_links]
prefix = seqan
base_url = http://www.seqan.de/dddoc/html/
dox_prefix = dox
dox_base_url = http://docs.seqan.de/dev3/
Use something like this to test the plugin:
* {{{[seqan:Page.Sequences]}}} [seqan:Page.Sequences]
* {{{seqan:Class.Finder}}} seqan:Class.Finder
* {{{seqan:"Concept.Simple Type"}}} seqan:"Concept.Simple Type"
* {{{seqan:"Spec.Chunk Pool Allocator}}} seqan:"Spec.Chunk Pool Allocator"
*
* {{{dox:ContainerConcept#length}}}
"""
import urllib
import sys
from trac.core import *
import trac.wiki
import genshi.builder as gb
import genshi
from trac.web.chrome import ITemplateProvider, add_stylesheet
def getFilename(cat, item):
"""Get the filename that dddoc would create.
Args:
cat String, category of the link.
item String, name of the item.
Returns:
File name of the categorized item.
"""
return cat.upper() + escapeFiles(item) + ".html"
def escapeFiles(text):
"""Escape the file name as dddoc would do it.
Args:
text String with the text to escape.
Returns:
Escaped text.
"""
text = text.replace("_", "__")
ret = ""
for i in range(len(text)):
if (text[i] >= 'A') and (text[i] <= 'Z'):
ret += "_"
ret += text[i]
ret = ret.replace("\t", "_09")
ret = ret.replace("\n", "_0a")
ret = ret.replace("!", "_21")
ret = ret.replace("\"", "_22")
ret = ret.replace("#", "_23")
ret = ret.replace("$", "_24")
ret = ret.replace("%", "_25")
ret = ret.replace("&", "_26")
ret = ret.replace("'", "_27")
ret = ret.replace("(", "_28")
ret = ret.replace(")", "_29")
ret = ret.replace("*", "_2a")
ret = ret.replace("+", "_2b")
ret = ret.replace("/", "_2f")
ret = ret.replace(":", "_3a")
ret = ret.replace(",", "_2c")
ret = ret.replace("<", "_3c")
ret = ret.replace(">", "_3e")
ret = ret.replace("?", "_3f")
ret = ret.replace("\\", "_5c")
ret = ret.replace("|", "_7c")
ret = ret.replace(" ", "+")
if (len(ret) == 0) or (ret[0] == '_'): return ret
else: return '.'+ret
class SeqanDocsSyntaxProvider(trac.core.Component):
"""Expands seqan:<Category>.<EntryName> links."""
trac.core.implements(trac.wiki.IWikiSyntaxProvider)
implements(ITemplateProvider)
SECTION_NAME = 'seqan_doc_links'
DEFAULT_PREFIX = 'seqan'
DEFAULT_BASE_URL = 'http://www.seqan.de/dddoc/html/'
def __init__(self):
# Set defaults.
self.prefix = self.DEFAULT_PREFIX
self.base_url = self.DEFAULT_BASE_URL
# Parse configuration from trac.ini config file.
for option in self.config.options(self.SECTION_NAME):
if option[0] == 'prefix':
self.prefix = option[1]
if option[0] == 'base_url':
self.base_url = option[1]
def get_wiki_syntax(self):
"""Method from IWikiSyntaxProvider.
Returns empty list, we do not implement any."""
return []
def get_link_resolvers(self):
"""Method from IWikiSyntaxProvider.
Returns iterable (list) of (prefix, function) pairs.
"""
return [(self.prefix, self.format_doc_link)]
def format_doc_link(self, formatter, ns, target, label):
"""Function to perform formatting for seqan:XYZ links.
This roughly follows [1].
[1] http://trac.edgewall.org/wiki/TracDev/IWikiSyntaxProviderExample
"""
add_stylesheet(formatter.req, 'doc_links/css/doc_links.css')
# The following is a heuristic for "no alternative label".
if ns in label and target in label:
if '.' in target:
category, item = tuple(target.split('.', 1))
label = item
# Strip everything before and including the first hash.
if '#' in label:
label = label.split('#', 1)[1]
else:
label = target
# Ignore if the target does not contain a dot.
if not '.' in target:
return target
# Now, use dddoc's logic to generate the appropriate file name for
file_name = getFilename(*target.split('.', 1))
span = [gb.tag.span(genshi.HTML(' '), class_='icon'), label]
title = ' "%s" in SeqAn documentation.' % target
return gb.tag.a(span, class_='doc-link',
href=self.base_url + file_name, title=title)
### ITemplateProvider methods
def get_templates_dirs(self):
return []
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('doc_links', resource_filename(__name__, 'htdocs'))]
class SeqanDosSyntaxProvider(trac.core.Component):
"""Expands dox:<entry-id> links."""
trac.core.implements(trac.wiki.IWikiSyntaxProvider)
implements(ITemplateProvider)
SECTION_NAME = 'seqan_doc_links'
DEFAULT_PREFIX = 'dox'
DEFAULT_BASE_URL = 'http://www.seqan.de/dddoc/html/'
def __init__(self):
# Set defaults.
self.prefix = self.DEFAULT_PREFIX
self.base_url = self.DEFAULT_BASE_URL
# Parse configuration from trac.ini config file.
for option in self.config.options(self.SECTION_NAME):
if option[0] == 'dox_prefix':
self.prefix = option[1]
if option[0] == 'dox_base_url':
self.base_url = option[1]
def get_wiki_syntax(self):
"""Method from IWikiSyntaxProvider.
Returns empty list, we do not implement any."""
return []
def ge
|
t_link_resolvers(self):
"""Method from IWikiSyntaxProvider.
Returns iterable (list) of (prefix, function) pairs.
"""
return [(self.prefix, self.format_doc_link)]
def format_doc_link(self, formatter, ns, target, label):
"""Function to perfo
|
rm formatting for dox:XYZ links.
This roughly follows [1].
[1] http://trac.edgewall.org/wiki/TracDev/IWikiSyntaxProviderExample
"""
# Stylesheet already done for doc_links.
add_stylesheet(formatter.req, 'doc_links/css/doc_links.css')
# The following is a heuristic for "no alternative label".
if not label:
label = target
if label.startswith(self.prefix + ':'):
label = label[len(self.prefix) + 1:]
# Now, use dddoc's logic to generate the appropriate file name for
query = '?p=%s' % target # TODO(holtgrew): url encode
span = [gb.tag.span(genshi.HTML(' '), class_='icon'), label]
title = ' "%s" in SeqAn documentation.' % target
return gb.tag.a(span, class_='doc-link',
href=self.base_url + query, title=title)
### ITemplateProvider methods
def get_templates_dirs(self):
return []
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('doc_links', resource_filename(__name__, 'htdocs'))]
|
cloudcopy/seahub
|
seahub/api2/serializers.py
|
Python
|
apache-2.0
| 4,418
| 0.003395
|
import re
from rest_framework import serializers
from seahub.auth import authenticate
from seahub.api2.models import Token, TokenV2, DESKTOP_PLATFORMS
from seahub.api2.utils import get_client_ip
from seahub.utils import is_valid_username
def all_none(values):
for value in values:
if value is not None:
return False
return True
def all_not_none(values):
for value in value
|
s:
if value is None:
return False
return True
_ANDROID_DEVICE_ID_PATTERN = re.compile('^[a-f0-9]{1,16}$')
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
|
password = serializers.CharField()
# There fields are used by TokenV2
platform = serializers.CharField(required=False)
device_id = serializers.CharField(required=False)
device_name = serializers.CharField(required=False)
# These fields may be needed in the future
client_version = serializers.CharField(required=False)
platform_version = serializers.CharField(required=False)
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
platform = attrs.get('platform', None)
device_id = attrs.get('device_id', None)
device_name = attrs.get('device_name', None)
client_version = attrs.get('client_version', None)
platform_version = attrs.get('platform_version', None)
v2_fields = (platform, device_id, device_name, client_version, platform_version)
# Decide the version of token we need
if all_none(v2_fields):
v2 = False
elif all_not_none(v2_fields):
v2 = True
else:
raise serializers.ValidationError('invalid params')
# first check username and password
if username:
if not is_valid_username(username):
raise serializers.ValidationError('username is not valid.')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
raise serializers.ValidationError('User account is disabled.')
else:
raise serializers.ValidationError('Unable to login with provided credentials.')
else:
raise serializers.ValidationError('Must include "username" and "password"')
# Now user is authenticated
if v2:
token = self.get_token_v2(username, platform, device_id, device_name,
client_version, platform_version)
else:
token = self.get_token_v1(username)
return token.key
def get_token_v1(self, username):
token, created = Token.objects.get_or_create(user=username)
return token
def get_token_v2(self, username, platform, device_id, device_name,
client_version, platform_version):
if platform in DESKTOP_PLATFORMS:
# desktop device id is the peer id, so it must be 40 chars
if len(device_id) != 40:
raise serializers.ValidationError('invalid device id')
elif platform == 'android':
# See http://developer.android.com/reference/android/provider/Settings.Secure.html#ANDROID_ID
# android device id is the 64bit secure id, so it must be 16 chars in hex representation
# but some user reports their device ids are 14 or 15 chars long. So we relax the validation.
if not _ANDROID_DEVICE_ID_PATTERN.match(device_id.lower()):
raise serializers.ValidationError('invalid device id')
elif platform == 'ios':
if len(device_id) != 36:
raise serializers.ValidationError('invalid device id')
else:
raise serializers.ValidationError('invalid platform')
request = self.context['request']
last_login_ip = get_client_ip(request)
return TokenV2.objects.get_or_create_token(username, platform, device_id, device_name,
client_version, platform_version, last_login_ip)
class AccountSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField()
is_staff = serializers.BooleanField(default=False)
is_active = serializers.BooleanField(default=True)
|
Zlash65/erpnext
|
erpnext/patches/v10_0/set_b2c_limit.py
|
Python
|
gpl-3.0
| 417
| 0.014388
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.
|
txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("regional", "doctype", "gst_settings")
frappe.reload_doc
|
("accounts", "doctype", "gst_account")
gst_settings = frappe.get_doc("GST Settings")
gst_settings.b2c_limit = 250000
gst_settings.save()
|
hombit/house
|
house/secrets.py
|
Python
|
mit
| 130
| 0
|
import os
y
|
andex_api_key = os.environ['YANDEX_API_KEY']
weather_underground_api_key = os.envir
|
on['WEATHER_UNDERGROUND_API_KEY']
|
PulseRain/Arduino_M10_IDE
|
M10_upload/FP51_upload.py
|
Python
|
lgpl-3.0
| 9,618
| 0.020067
|
#! python3
###############################################################################
# Copyright (c) 2016, PulseRain Technology LLC
#
# This program is distributed under a dual license: an open source license,
# and a commercial license.
#
# The open source license under which this program is distributed is the
# GNU Public License version 3 (GPLv3).
#
# And for those who want to use this program in ways that are incompatible
# with the GPLv3, PulseRain Technology LLC offers commercial license instead.
# Please contact PulseRain Technology LLC (www.pulserain.com) for more detail.
#
###############################################################################
import sys, getopt
import math, time
from OCD_8051 import OCD_8051
from ROM_Hex_Format import *
from time import sleep
print ("===============================================================================")
print ("# Copyright (c) 2017, PulseRain Technology LLC ")
print ("# FP51 Code Upload Utility, Version 1.2")
try:
opts, args = getopt.getopt(sys.argv[1:],"vDp:c:P:C:I:o:b:U:",[])
except (getopt.GetoptError, err):
print (str(err))
sys.exit(2)
baud_rate = 115200
com_port = "COM5"
image_file = "sketch.eep"
for opt, args in opts:
if opt in ('-b'):
baud_rate = int (args)
elif opt in ('-P'):
com_port = args
elif opt in ('-U'):
image_file = args
print ("===============================================================================")
print ("baud_rate = ", baud_rate)
print ("com_port = ", com_port)
print ("image file = ", image_file)
print ("===============================================================================")
try:
ocd = OCD_8051 (com_port, baud_rate, verbose=0)
except:
print ("Failed to open COM port")
sys.exit(1)
class dummy_console:
#############################################################################
# command procedures
#############################################################################
_DEBUG_COUNTER_INDEX_RESET = 1
_DEBUG_COUNTER_INDEX_SET = 2
_TIME_COUNTER_INDEX_RESET = 3
_TIME_COUNTER_INDEX_SET = 4
def _string_to_data (self, data_string):
if (data_string.startswith('0x')):
data = int(data_string[2:], 16)
else:
data = int(data_string)
return data
def _do_reset_cpu (self):
self._ocd._serial.reset_output_buffer()
self._ocd._serial.reset_input_buffer()
self._ocd.cpu_reset()
self._ocd._serial.reset_output_buffer()
self._ocd._serial.reset_input_buffer()
def _do_pause_cpu (self):
self._ocd.cpu_pause (1, 1)
def _do_resume_cpu (self):
self._ocd.cpu_pause (0, 1)
def _write_code (self, addr, data):
offset = 0
length = len (data)
addr_end = addr + length
if (addr % 4):
for i in range (min([(4 - (addr % 4)), length])):
self._ocd.code_mem_write_byte (addr + offset, data[i])
offset = offset + 1
total_words = (addr_end - addr - offset) // 4
total_128byte_frame = total_words //32
for i in range (total_128byte_frame):
self._ocd.code_mem_write_128byte (addr + offset, data[offset : offset + 128])
offset = offset + 128
for i in range (total_words - total_128byte_frame * 32):
data_int = (data[offset] << 24) + \
(data[offset + 1] << 16) + \
(data[offset + 2] << 8) + \
(data[offset + 3])
##print ("write32bit addr = ", addr + offset, "data_int=", hex(data_int))
self._ocd.code_mem_write_32bit(addr + offset, data_int)
offset = offset + 4
for i in range (length - offset):
self._o
|
cd.code_mem_write_byte (addr + offset, data [offset])
offset = offset + 1
def _do_load_hex_file (self):
intel_hex_file = Intel_Hex(self._args[1])
if (len (intel_hex_file.da
|
ta_record_list) == 0):
return
if (len(self._args) > 2):
try:
f = open(self._args[2], 'w')
except IOError:
print ("Fail to open: ", self._args[2])
return
#self._do_pause_cpu()
#print ("CPU paused");
#print ("CPU reset ...")
#self._do_reset_cpu()
#sleep(0.5)
print ("Loading...", self._args[1])
last_addr = intel_hex_file.data_record_list[-2].address + len(intel_hex_file.data_record_list[-1].data_list)
len_completed = 0
address = 0
merge_data_list = []
start_time = time.clock()
print_cnt = 0
print ("Writing | ", end="")
for record in intel_hex_file.data_record_list:
#print ("xxxxaddr=", record.address, "data=", record.data_list)
if ((print_cnt % 16) == 0):
print("#", end="")
sys.stdout.flush()
print_cnt = print_cnt + 1
if (len(merge_data_list) == 0):
address = record.address
merge_data_list = record.data_list
#print ("YY addr = ", address, " ", len (merge_data_list))
elif ((address + len (merge_data_list)) == record.address):
merge_data_list = merge_data_list + record.data_list
#print ("WW addr = ", address, " ", len (merge_data_list))
#print (merge_data_list)
else:
#print ("XXXXXXXXXXXXXXX ", address, " ", len(merge_data_list))
self._write_code (address, merge_data_list)
#print ("YYYYYYYYYYYYYYYY")
len_completed = len_completed + len(merge_data_list)
load_progress = math.ceil(len_completed * 100 / last_addr);
if (load_progress > 100):
load_progress = 100
#print ("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", end="")
#print ("%d%% completed" % load_progress, end="")
print("#", end="")
sys.stdout.flush()
if (len(self._args) > 2):
f.write('addr %d\n' % (address))
for item in merge_data_list:
f.write('%d\n' % (item))
address = record.address
merge_data_list = record.data_list
if (len(self._args) > 2):
f.close()
end_time = time.clock()
delta_time = end_time - start_time
print (" | 100% {0:0.2f}s".format(delta_time))
self._do_resume_cpu()
print ("\nCPU reset ...")
self._do_reset_cpu()
print ("Done: ", last_addr, " Byte(s)")
print ("CPU is running")
def _do_uart_select (self):
self._ocd.uart_select (1 - self.uart_raw_mode_enable)
def _do_uart_switch (self):
self.uart_raw_mode_enable = 1 - self.uart_raw_mode_enable
self._ocd._serial.reset_output_buffer()
self._ocd._serial.reset_input_buffer()
self._do_uart_select()
self._ocd._serial.write ([ord('\r')])
sleep(0.5)
if (self._ocd._serial.in_waiting):
r = self._ocd._serial.read (self._ocd._serial.in_waiting)
prt_out = ""
for i in r:
if (i < 128):
prt_out = prt_out + chr(i)
#print (prt_out, end="")
#sys.stdout.flush()
def _do_load_hex_and_switch (self):
self._do_load_
|
cripplet/practice
|
codeforces/492/attempt/b_lanterns.py
|
Python
|
mit
| 897
| 0.044593
|
import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
(n, l) = str_to_int(args[0])
a = tuple(str_to_int(args[1]))
return(l, a)
def solve(args, verbose=False):
(l, a) = proc_input(args)
list_a = list(a)
list_a.sort()
max_dist = max(list_a[0] * 2, (l - list_a[-1]) * 2)
for x in xr
|
ange(len(a) - 1):
max_dist = max(max_dist, list_a[x + 1] - list_a[x])
if verbose:
print max_dist / float(2)
return max_dist / float(2)
def test():
assert(str_to_int('1 2 3') == [ 1, 2
|
, 3 ])
assert(proc_input([ '2 5', '2 5' ]) == (5, (2, 5)))
assert(solve([ '2 5', '2 5' ]) == 2.0)
assert(solve([ '4 5', '0 1 2 3' ]) == 2.0)
assert(solve([ '7 15', '15 5 3 7 9 14 0' ]) == 2.5)
if __name__ == '__main__':
from sys import argv
if argv.pop() == 'test':
test()
else:
solve(list(fileinput.input()), verbose=True)
|
kevindiltinero/seass3
|
tests/test_toggle.py
|
Python
|
bsd-2-clause
| 137
| 0.014599
|
from tests import tests
def test_toggle():
temporary = te
|
sts.toggled_seats
asser
|
t temporary == [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
|
mafiya69/sympy
|
sympy/solvers/tests/test_solveset.py
|
Python
|
bsd-3-clause
| 40,909
| 0.001076
|
from sympy import (
Abs, Dummy, Eq, Gt, Function,
LambertW, Piecewise, Poly, Rational, S, Symbol, Matrix,
asin, acos, acsc, asec, atan, atanh, cos, csc, erf, erfinv, erfc, erfcinv,
exp, log, pi, sin, sinh, sec, sqrt, symbols,
tan, tanh, atan2, arg,
Lambda, imageset, cot, acot, I, EmptySet, Union, E, Interval, Intersection,
oo)
from sympy.core.function import nfloat
from sympy.core.relational import Unequality as Ne
from sympy.functions.elementary.complexes import im, re
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys.rootoftools import CRootOf
from sympy.sets import (FiniteSet, ConditionSet, Complement, ImageSet)
from sympy.utilities.pytest import XFAIL, raises, skip, slow
from sympy.utilities.randtest import verify_numerically as tn
from sympy.physics.units import cm
from sympy.solvers.solveset import (
solveset_real, domain_check, solveset_complex, linear_eq_to_matrix,
linsolve, _is_function_class_equation, invert_real, invert_complex,
solveset)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
c = Symbol('c', real=True)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
q = Symbol('q', real=True)
m = Symbol('m', real=True)
n = Symbol('n', real=True)
def test_invert_real():
x = Symbol('x', real=True)
y = Symbol('y')
n = Symbol('n')
def ireal(x, s=S.Reals):
return Intersection(s, x)
minus_n = Intersection(Interval(-oo, 0), FiniteSet(-n))
plus_n = Intersection(Interval(0, oo), FiniteSet(n))
assert solveset(abs(x) - n, x, S.Reals) == Union(minus_n, plus_n)
assert invert_real(exp(x), y, x) == (x, ireal(FiniteSet(log(y))))
y = Symbol('y', positive=True)
n = Symbol('n', real=True)
assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_real(exp(x), y, x) == (x,
|
FiniteSet(log(y)))
assert invert_real(exp(3*x
|
), y, x) == (x, FiniteSet(log(y) / 3))
assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3))
assert invert_real(exp(x) + 3, y, x) == (x, ireal(FiniteSet(log(y - 3))))
assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3)))
assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y)))
assert invert_real(log(3*x), y, x) == (x, FiniteSet(exp(y) / 3))
assert invert_real(log(x + 3), y, x) == (x, FiniteSet(exp(y) - 3))
minus_y = Intersection(Interval(-oo, 0), FiniteSet(-y))
plus_y = Intersection(Interval(0, oo), FiniteSet(y))
assert invert_real(Abs(x), y, x) == (x, Union(minus_y, plus_y))
assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2)))
assert invert_real(2**exp(x), y, x) == (x, ireal(FiniteSet(log(log(y)/log(2)))))
assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y)))
assert invert_real(x**Rational(1, 2), y, x) == (x, FiniteSet(y**2))
raises(ValueError, lambda: invert_real(x, x, x))
raises(ValueError, lambda: invert_real(x**pi, y, x))
raises(ValueError, lambda: invert_real(S.One, y, x))
assert invert_real(x**31 + x, y, x) == (x**31 + x, FiniteSet(y))
y_1 = Intersection(Interval(-1, oo), FiniteSet(y - 1))
y_2 = Intersection(Interval(-oo, -1), FiniteSet(-y - 1))
assert invert_real(Abs(x**31 + x + 1), y, x) == (x**31 + x,
Union(y_1, y_2))
assert invert_real(sin(x), y, x) == \
(x, imageset(Lambda(n, n*pi + (-1)**n*asin(y)), S.Integers))
assert invert_real(sin(exp(x)), y, x) == \
(x, imageset(Lambda(n, log((-1)**n*asin(y) + n*pi)), S.Integers))
assert invert_real(csc(x), y, x) == \
(x, imageset(Lambda(n, n*pi + (-1)**n*acsc(y)), S.Integers))
assert invert_real(csc(exp(x)), y, x) == \
(x, imageset(Lambda(n, log((-1)**n*acsc(y) + n*pi)), S.Integers))
assert invert_real(cos(x), y, x) == \
(x, Union(imageset(Lambda(n, 2*n*pi + acos(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - acos(y)), S.Integers)))
assert invert_real(cos(exp(x)), y, x) == \
(x, Union(imageset(Lambda(n, log(2*n*pi + acos(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - acos(y))), S.Integers)))
assert invert_real(sec(x), y, x) == \
(x, Union(imageset(Lambda(n, 2*n*pi + asec(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - asec(y)), S.Integers)))
assert invert_real(sec(exp(x)), y, x) == \
(x, Union(imageset(Lambda(n, log(2*n*pi + asec(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - asec(y))), S.Integers)))
assert invert_real(tan(x), y, x) == \
(x, imageset(Lambda(n, n*pi + atan(y)), S.Integers))
assert invert_real(tan(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + atan(y))), S.Integers))
assert invert_real(cot(x), y, x) == \
(x, imageset(Lambda(n, n*pi + acot(y)), S.Integers))
assert invert_real(cot(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + acot(y))), S.Integers))
assert invert_real(tan(tan(x)), y, x) == \
(tan(x), imageset(Lambda(n, n*pi + atan(y)), S.Integers))
x = Symbol('x', positive=True)
assert invert_real(x**pi, y, x) == (x, FiniteSet(y**(1/pi)))
# Test for ``set_h`` containing information about the domain
n = Dummy('n')
x = Symbol('x')
h1 = Intersection(Interval(-oo, -3), FiniteSet(-a + b - 3),
imageset(Lambda(n, n - a - 3), Interval(0, oo)))
h2 = Intersection(Interval(-3, oo), FiniteSet(a - b - 3),
imageset(Lambda(n, -n + a - 3), Interval(0, oo)))
assert invert_real(Abs(Abs(x + 3) - a) - b, 0, x) == (x, Union(h1, h2))
def test_invert_complex():
assert invert_complex(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_complex(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_complex(exp(x), y, x) == \
(x, imageset(Lambda(n, I*(2*pi*n + arg(y)) + log(Abs(y))), S.Integers))
assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y)))
raises(ValueError, lambda: invert_real(1, y, x))
raises(ValueError, lambda: invert_complex(x, x, x))
raises(ValueError, lambda: invert_complex(x, x, 1))
def test_domain_check():
assert domain_check(1/(1 + (1/(x+1))**2), x, -1) is False
assert domain_check(x**2, x, 0) is True
assert domain_check(x, x, oo) is False
assert domain_check(0, x, oo) is False
def test_is_function_class_equation():
from sympy.abc import x, a
assert _is_function_class_equation(TrigonometricFunction,
tan(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x) - a, x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x + a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x*a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
a*tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**2 + sin(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + x, x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**
|
yacoin/yacoin
|
test/functional/test_framework/test_node.py
|
Python
|
mit
| 24,353
| 0.003203
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
#
|
Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.pa
|
rse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
EncodeDecimal,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, block_fork_1_0=0):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
print("Block fork "+str(block_fork_1_0))
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
"-testnetNewLogicBlockNumber="+str(block_fork_1_0)
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version is None or self.version >= 190000:
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('Y8g8qJKQBvzaQLzyBfwzq3ochnDf2yJN4W', 'XLhuyJ8A62v5mqpzxRtUjLNk1jasQ88hKhcLo5UebwNCE13Eh4o6'),
AddressKeyPair('YBFPVzbEXB8pKodjyk18CcYHLzTRq8QNrL', 'XSUubLFLG5vL3h3QMLY93fRnLn2tYwomFUYhJh7RwkAUsxSwpvaH'),
AddressKeyPair('YHjuPreds6a2UbztowqctHy92iEbd34rXR', 'XLrbkSZbFCU8z2Y3QUPcgH8TvkgN4f6iva2bVET4XKRgt8Z9iubQ'),
AddressKeyPair('YLUcyhXWz8JbXekmKajg2vZUg1uweW6pWn', 'XRxAth6ux7KQiJ4YY95qms9o7mKsK5ad9iJzWjAMeg8kL1ZyAUUH'),
AddressKeyPair('YF3pqbSzCJF3CQ9jXFMmNDpqj5NENFAw5Q', 'XLwhJc7aVHYjPzHMuYtD311p9n7krdAwPCiLgyDZSnMQkZsDD5Ed'),
AddressKeyPair('Y4s27oqeta92QGJsRmHLdjBQAQ8256EpWW', 'XLtYHAyoFtu2gu7MQuAEDH8cU8uVQbJkMpgYZKRSnCJbL1aUzKaZ'),
AddressKeyPair('Y8cp27tChD8bJVG7Dtrf1aDeQRLLa1vErv','XQqkHTd8VQe2eMB2oA8obx9rWLfZicWJirT6xVAdGJ9nWgwDEP89'),
AddressKeyPair('Y8WHgiWoKBZxboFVLk1VpPs9u76xiSmD4g', 'XTDroJ2meDFXLj4ERzYmv1PpZDduKMXj4GgNgX1MR9BmRkWnqeUD'),
AddressKeyPair('YF1u3m8z7nn9RStw7XBfHH35GpwYf4wpx9', 'XPrvw7w2QeGMsY19mXdSMHuidZnMcewc38FMK28ua6id3cuQCB4K'),
AddressKeyPair('Y8RMtv2rPfNj3JeUq5Zb6xxvUeyXULz8ki', 'XPayZe8BXgKqarj3ZPRZmtcyqdkMPCpZR6bnF2EnJ1NdyivjTWU9'),
AddressKeyPair('YHU8rSKMYsHKv9JPiuzGD5tabVN1zxfrhy', 'XQC4xmMd8S1tNwKpMcKb2kjotURFtnVUsauNB6eChvsrRkUZKGFM'),
AddressKeyPair('YFzYDbzCkNbViCjkYJ2i9pzfhWpKMQzhcb', 'XMJ75XrddqxpyGeUrsVZs7PbVYKN2s7y5oRNzRY9v2C6S36f58Kd')
# AddressKeyPair('YCYpkfgECJsUSH42FzTgpbjWRqf1E6oRcK', 'XSs57SNVPmiNMmNJx3hRdMycTPhZHnocYLqxWYpFsAaLZUgtXooY')
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any exist
|
meerkat-code/meerkat_api
|
meerkat_api/test/test_locations.py
|
Python
|
mit
| 8,430
| 0.00083
|
#!/usr/bin/env python3
"""
Meerkat API Tests
Unit tests for the location resource in Meerkat API
"""
import json
import unittest
import meerkat_api
from meerkat_api.test import db_util
from meerkat_api.resources import locations
from meerkat_api.test.test_data.locations import DEVICE_IDS_CSV_LIST, DEVICEID_1, DEVICE_IDS_IMEI_CSV_LIST, \
LOCATION_NUMBER
from . import settings
class MeerkatAPILocationTestCase(unittest.TestCase):
def setUp(self):
"""Setup for testing"""
meerkat_api.app.config['TESTING'] = True
meerkat_api.app.config['API_KEY'] = ""
self.app = meerkat_api.app.test_client()
meerkat_api.app.app_context().push()
session = db_util.session
db_util.insert_codes(session)
db_util.insert_locations(session)
def tearDown(self):
pass
def test_locations(self):
"""Check locations"""
rv = self.app.get('/locations', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(len(data), LOCATION_NUMBER)
self.assertEqual(set(data.keys()), set([repr(x) for x in range(1, LOCATION_NUMBER + 1)]))
self.assertEqual(data["11"]["name"], "Clinic 5")
self.assertEqual(data["11"]["parent_location"], 6)
self.assertEqual(data["5"]["name"], "District 2")
def test_location(self):
"""Check location"""
rv = self.app.get('/location/11', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["name"], "Clinic 5")
self.assertEqual(data["parent_location"], 6)
rv = self.app.get('/location/7', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["name"], "Clinic 1")
self.assertEqual(data["parent_location"], 4)
def test_tot_clinics(self):
"""Check tot_clinics"""
rv = self.app.get('/tot_clinics/1', headers=settings.header)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["total"], 4)
rv = self.app.get('/tot_clinics/2', headers=settings.header)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["total"], 3)
rv = self.app.get('/tot_clinics/3', headers=settings.header)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["total"], 1)
# With clinic type
tot_clinic = locations.TotClinics()
data = tot_clinic.get(1, "SARI")
self.assertEqual(data["total"], 2)
data = tot_clinic.get(1, "Refugee")
self.assertEqual(data["total"], 2)
def test_location_tree(self):
""" Test the location tree """
rv = self.app.get('/locationtree', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["text"], "Demo")
nodes = data["nodes"]
ids = []
for n in nodes:
ids.append(n["id"])
self.assertIn(2, ids)
self.assertIn(3, ids)
self.assertNotIn(4, ids)
self.assertNotIn(5, ids)
district_level = nodes[0]["nodes"]
ids = []
for n in district_level:
ids.append(n["id"])
self.assertIn(4, ids)
self.assertIn(5, ids)
self.assertNotIn(6, ids)
clinic_level = district_level[0]["nodes"]
ids = []
for n in clinic_level:
ids.append(n["id"])
self.assertIn(7, ids)
self.assertIn(8, ids)
self.assertNotIn(9, ids)
self.assertNotIn(10, ids)
self.assertNotIn(11, ids)
# Test location tree filtering functionality
# A utility function to recursively get the clinics out of the tree
def get_clinics(tree):
children = []
if tree['nodes']:
for child in tree['nodes']:
children += get_clinics(child)
if not child['nodes']:
children += [child['text']]
return children
# Test inc functionality
rv = self.app.get(
'/locationtree?inc_case_types=["pip"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?inc_case_types=["pip"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 2', clinics)
self.assertIn('Clinic 4', clinics)
self.assertIn('Clinic 5', clinics)
self.assertEqual(len(clinics), 3)
rv = self.app.get(
'/locationtree?inc_case_types=["pip","mh"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?inc_case_types=["pip", "mh"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 2', clinics)
self.assertIn('Clinic 1', clinics)
self.assertIn('Clinic 4', clinics)
self.assertIn('Clinic 5', clinics)
self.assertEqual(len(clinics), 4)
# Test exc functionality
rv = self.app.get(
'/locationtree?exc_case_types=["pip"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?exc_case_types=["pip"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 1', clinics)
self.assertIn('Clinic 4', clinics)
self.assertIn('Clinic 5', clinics)
self.assertEqual(len(clinics), 3)
rv = self.app.get(
'/locationtree?exc_case_types=["pip", "mh"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?exc_case_types=["pip", "mh"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 4', clinics)
self.assertEqual(len(clinics), 1)
# Test both inc and exc functionality
rv = self.app.get(
'/locationtree?inc_case_types=["mh"]&exc_case_types=["pip","mh"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print(
'/locationtree?inc_case_types=["mh"]&exc_case_types=["pip","mh"]'
)
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 4', clinics)
self.assertEqual(len(clinics), 1)
def test_location_by_non_existing_device_id(self):
|
for id in ["42", "fake_device_id", DEVICEID_1[1:]]:
rv = self.app.get('locations?deviceId={}'.format(id), headers=settings.header)
self.assertEqual(rv.status_code, 200)
actual_response_json = json.loads(rv.data.decode('utf-8'))
empty_js
|
on = {}
self.assertEqual(actual_response_json, empty_json)
def test_location_by_device_id(self):
for id in DEVICE_IDS_CSV_LIST.split(','):
self.validate_correct_location_returned(deviceid=id, expected_loc_id='12')
def test_location_by_device_id_imei_format(self):
for id in DEVICE_IDS_IMEI_CSV_LIST.split(','):
self.validate_correct_location_returned(deviceid=id, expected_loc_id='13')
def validate_correct_location_returned(self, deviceid=None, expected_loc_id=None):
rv = self.app.get('/locations?deviceId={}'.format(deviceid), headers=settings.header)
self.assertEqual(rv.status_code, 200)
actu
|
munhyunsu/UsedMarketAnalysis
|
joonggonara_crawl/joonggonara/spiders/lgt_spiders.py
|
Python
|
gpl-3.0
| 7,133
| 0.033962
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##### ##### ===== 포함 파일 =====
# 개인적인 아이디, 비밀번호 파일.
from personal.jconfig import LOGIN_ID, LOGIN_PW
# scrapy item 파일.
from joonggonara.items import JoonggonaraItem
# 로그인을 위한 FormRequest.
# 로그인 이후 크롤링을 위한 Request.
from scrapy.http import FormRequest, Request
# 게시판 페이지에서 각 게시글 url을 얻어온 후 url을 Spider에 넣어주기 위한 urljoin.
from urlparse import urljoin
# scrapy를 사용하기 위한 scrapy.
import scrapy
# response에서 ArticleNumber를 얻어내기위한 re.
import re
# file의 존재유무 체크를 위한 os.path
import os.path
# 랜덤 sleep을 위한 time, random
import time
import random
# Database를 위한 sqlite3
import sqlite3
##### ##### ===== 포함 파일 끝 =====
##### ##### ===== 전역 변수 지역 =====
CRAWL_TARGET = 0
CRAWL_COUNT = 0
MAX_PAGE = 0
DOWNLOAD_DELAY = 2
conn = None
cur = None
##### ##### ===== 전역 변수 지역 끝 =====
##### ##### ===== 프로젝트별 변수 =====
# 주요 변수
SPIDER_NAME = 'lgt'
START_URL = 'http://nid.naver.com/nidlogin.login'
BOARD_PAGE_URL = 'http://cafe.naver.com/ArticleList.nhn?search.boardtype=L&userDisplay=50&search.menuid=425&search.questionTab=A&search.clubid=10050146&search.specialmenutype=&search.totalCount=501&search.page=' # SKT - 339, KT - 424, LGT - 425, 여성상의(fup) - 356, 남성상의(mup) - 358
ARTICLE_URL = 'http://cafe.naver.com/ArticleRead.nhn?clubid=10050146&page=1&menuid=425&boardtype=L&articleid='
DATABASE_NAME = 'joonggonara.sqlite'
LIST_DB = 'list_lgt'
DOWNLOADED_DB = 'downloaded_lgt'
# 임시 변수
TARGET_FILE = 'target_lgt.txt'
MAX_FILE = 'max_lgt.txt'
LOGIN_FILE = 'output/login_lgt.html'
ARTICLE_AHREF = '//a[contains(@href, "articleid") and not(contains(@href, "specialmenutype"))]/@href'
SAVE_LOCATION = 'output/lgt/'
##### ##### ===== 프로젝트별 변수 끝 =====
##### ##### ===== 클래스 선언 지역 =====
##### ----- -----
##### 중고나라 스파이더 클래스
##### ----- -----
class Spider(scrapy.Spider):
name = SPIDER_NAME
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 딜레이 설정
download_delay = DOWNLOAD_DELAY
# 로그인을 하고 시작해야함
# 따라서 로그인 페이지에서 시작
start_urls = [
START_URL
]
# 파일로부터 수집할 개수를 읽어옴
# 이렇게 하는 것이 소스코드 수정 없이 수집양을 조절할 수 있음
target_file = open(TARGET_FILE, 'r')
CRAWL_TARGET = int(target_file.readline())
target_file.close()
max_file = open(MAX_FILE, 'r')
MAX_PAGE = int(max_file.readline())
max_file.close()
# 로그인을 하는 함수
def parse(self, response):
# 로그인을 수정하기 위한 부분
# 각 폼에 맞게 id와 pw를 입력
# 이후의 쿠키는 scrapy가 알아서 관리해줌
return scrapy.FormRequest.from_response(
response,
formname='frmNIDLogin',
formdata={'id': LOGIN_ID, 'pw': LOGIN_PW},
clickdata={'nr': 0},
callback=self.after_login
)
# 로그인이후 게시판 List에서 각 게시글 URL을 얻기위한 함수
def after_login(self, response):
# 글로벌 변수를 불러옴
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 로그인 디버깅 용
with open(LOGIN_FILE, 'wb') as f:
f.write(response.body)
f.close()
# Create Database Connector
conn = sqlite3.connect(DATABASE_NAME)
# Create Database Cursor
cur = conn.cursor()
# Create Table
cur.executescript('''
CREATE TABLE IF NOT EXISTS ''' + LIST_DB + ''' (
article_num INTEGER PRIMARY KEY NOT NULL UNIQUE);
''' +
'''
CREATE TABLE IF NOT EXISTS ''' + DOWNLOADED_DB + ''' (
article_num INTEGER PRIMARY KEY NOT NULL UNIQUE);
'''
)
conn.commit()
# 이전 수집때 목표로 저장해둔 리스트 수 불러오기
cur.execute('''
|
SELECT COUNT(*) FROM ''' + LIST_DB
)
CRAWL_COUNT = CRAWL_COUNT + int(cur.fetchone()[0])
# 로그인 성공 후 게시판에서 각 게시글의 URL을 따옴
return Request(url=BOARD_PAGE_URL + str(1), callback=self.parse_list)
# 수집한 게시판 정보에서 공지사항을 제외한 게시글 URL을 파싱
def parse_list(self, response):
# 글로벌 변수를 불러옴
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 사용자가 작성한 게시글 파악
for ahref in response.xpath(ARTICLE_AHREF).extract(
|
):
# 수집 목표량을 채웠을 경우 탈출
if CRAWL_COUNT >= CRAWL_TARGET:
break
# 게시글 번호 파싱
article_num = re.split(r'[?=&]', ahref)[12]
# 이미 받은 게시글일 경우 패스
cur.execute('SELECT * FROM ' + DOWNLOADED_DB + ' WHERE article_num = ' + str(article_num)
)
if cur.fetchone() is not None:
print 'tartget skip: ' + str(article_num)
continue
# 다운로드 대상에 입력
cur.execute('INSERT OR IGNORE INTO ' + LIST_DB + ' (article_num) VALUES (' + str(article_num) + ')'
)
conn.commit()
CRAWL_COUNT = CRAWL_COUNT + 1
# 목표 개수 만큼 리스트를 채웠는지 체크
page_num = int(re.split(r'[=]', response.url)[8])
if ((CRAWL_COUNT >= CRAWL_TARGET) or (page_num >= MAX_PAGE)):
return self.crawl_article()
else:
# 목표 개수 미달인 경우 다음 페이지 불러오기
next_url = BOARD_PAGE_URL + str(page_num+1)
return Request(url=next_url, callback=self.parse_list)
# 게시글 수집
def crawl_article(self):
# 글로벌 변수를 불러옴
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 다운로드 대상 리스트 불러오기
# 참고: yield로 Request를 전송하기 때문에 cur가 동시에 사용될 가능성이 있다
# 따라서 fetchall()로 데이터를 모두 가져와야 한다
cur.execute('SELECT * FROM ' + LIST_DB)
target_list = cur.fetchall()
# Request 보내기
for data in target_list:
# request_url 조립
article_num = data[0]
request_url = ARTICLE_URL + str(article_num)
# Request를 날리기 전 다운로드 대상 리스트에서 제거
cur.execute('DELETE FROM ' + LIST_DB + ' WHERE article_num = ' + str(article_num)
)
conn.commit()
# 랜덤 sleep
time.sleep(random.randint(0, 1))
# 요청 전송
yield Request(request_url, callback = self.parse_article)
# 각 게시글의 원본을 저장
def parse_article(self, response):
# 글로벌 변수를 불러옴.
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 수집한 게시글 다운로드 완료 리스트에 저장
article_num = re.split(r'[?=&]', response.url)[10]
cur.execute('INSERT OR IGNORE INTO ' + DOWNLOADED_DB + ' (article_num) VALUES (' + str(article_num) + ')'
)
conn.commit()
# 수집한 게시글을 파일로 저장
with open(SAVE_LOCATION + article_num + '.html', 'wb') as f:
f.write(response.body)
f.close()
##### ##### ===== 클래스 선언 지역 끝 =====
|
nicolasgallardo/TECHLAV_T1-6
|
bebop_ws/build/laser_scan_publisher_tutorial/catkin_generated/pkg.develspace.context.pc.py
|
Python
|
gpl-2.0
| 389
| 0
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
|
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else
|
[]
PROJECT_NAME = "laser_scan_publisher_tutorial"
PROJECT_SPACE_DIR = "/home/robot/bebop_ws/devel"
PROJECT_VERSION = "0.2.1"
|
rtucker-mozilla/WhistlePig
|
vendor-local/lib/python/tastypie/resources.py
|
Python
|
bsd-3-clause
| 94,814
| 0.002067
|
from __future__ import with_statement
import sys
import logging
import warnings
import django
from django.conf import settings
try:
from django.conf.urls import patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.core.signals import got_request_exception
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# copycompat deprecated in Django 1.5. If python version is at least 2.5, it
# is safe to use the native python copy module.
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
if sys.version_info >= (2,5):
try:
from copy import deepcopy
except ImportError:
from django.utils.copycompat import deepcopy
else:
# For python older than 2.5, we must be running a version of Django before
# copycompat was deprecated.
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"
|
""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/respons
|
e cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
|
sysadminmatmoz/ingadhoc
|
account_journal_book/models/account.py
|
Python
|
agpl-3.0
| 1,050
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import fields, models, api
import logging
_logger = logging.getLogger(__name__)
class account_move(models.Model):
_inherit = 'account.move'
number_in_book = fields.Char(
|
string='Number in Book',
help='This number is set when closing a period or by running a wizard'
)
|
_sql_constraints = [
('number_in_book_uniq', 'unique(number_in_book, company_id)',
'Number in Book must be unique per Company!')]
@api.multi
def moves_renumber(self, sequence):
_logger.info("Renumbering %d account moves.", len(self.ids))
for move in self:
new_number = sequence.with_context(
fiscalyear_id=move.period_id.fiscalyear_id.id)._next()
move.number_in_book = new_number
|
CPSC491FileMaker/project
|
helper.py
|
Python
|
gpl-2.0
| 2,133
| 0.018753
|
import xml.etree.ElementTree as et
import os, time
from xml.etree.ElementTree import Element
from PyQt4 import QtCore, QtGui
class Helper():
#initiatlizes the class and prepares an XMLtree for parsing
def __init__(self):
self.tree = et.parse('./data/data.xml')
self.root = self.tree.getroot()
def write(self, filename):
return self.tree.write(filename)
def updateEmployee(self):
eList = []
allfound = []
for employees in self.root:
for person in employees:
for info in person:
eList.append(info.text)
if(len(eList) == 2):
allfound.append(eList)
eList = []
# for x in allfound:
# print x
return allfound
def updateStatus(self):
sList = []
for child in self.root:
if(child.get('MODID') == '2'):
for stat in child:
sList.append(stat.text)
# for x in sList:
# print x
return sList
def removeEmployee(self, toRemove):
for employees in self.root:
for person in employees:
for info in person:
if (info.text == toRemove):
employees.remove(person)
self.write('./data/data.xml')
def removeStatus(self, toRemove):
proj_stats = self.root.find('Proj_statuses')
for status in proj_stats:
#for status in statuses:
if (status.text == toRemove):
proj_stats.remove(status)
self.write('./data/data.xml')
def addEmployee(self, eName, eColor):
emp = Element('Person')
kid = self.root.find('Employees')
name = Element('Name')
color = Element('Color')
name.text = eName
color.text = eColor
#print 'to xml:
|
' + name.text
#print 'to xml: ' + co
|
lor.text
emp.append(name)
emp.append(color)
kid.append(emp)
self.write('./data/data.xml')
def addStatus(self, sName):
st = Element('Stat')
st.text = sName
kid = self.root.find('Proj_statuses')
kid.append(st)
self.write('./data/data.xml')
if __name__ == "__main__":
A = Helper()
A.updateEmployee()
A.updateStatus()
A.addEmployee('Walter', '0x0000FF')
A.addStatus('Eating')
|
tryexceptpass/sofi
|
test/tablerow_test.py
|
Python
|
mit
| 429
| 0.011655
|
from sofi.ui import TableRow
def test_basic():
assert(str(TableRow()) == "<tr></tr>")
def test_text():
assert(str(TableRow("text")) == "<tr>text</tr>")
def test_custom_class_ident_style_and_att
|
rs():
assert(str(TableRow("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<tr id=\
|
"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</tr>")
|
mtik00/bottle-wiki
|
wikiapi.py
|
Python
|
mit
| 1,234
| 0.007293
|
""" A simple restful webservice to provide access to the wiki.db"""
import json
from bottle import Bottle, run, response, static_file, redirect
from dbfu
|
nctions import Wikidb
api = Bottle()
db = Wikidb()
@api.route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='./static')
@api.route('/api/search/<term>')
def search(term):
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.search(term))
@api.route('/api/detail/<subject>')
def details(subject):
response.headers['Content-Type'] = 'application/json'
response.h
|
eaders['Cache-Control'] = 'no-cache'
return json.dumps(db.detail(subject))
if __name__ == '__main__':
# Demonstrates the truely awesome awesomplete drawing data right from the search API above.
@api.route('/search')
def autocompletesearch():
return redirect('/static/autocomplete.html')
db.put('this is an article', 'this is the body of the article.')
db.put('this is another article', 'this is the body of the article.')
db.put('this is a third article', 'this is the body of the article.')
run(api, host='localhost',port=8080, debug=True)
|
code-ape/SocialJusticeDataProcessing
|
stats.py
|
Python
|
apache-2.0
| 3,877
| 0.003869
|
import json
import correlation
import category
import tools
import settings
from matplotlib.backends.backend_pdf import PdfPages
def process_data(data_type, stats, highlights):
print("Starting student data processing.")
all_pdf_path, highlight_pdf_path = (None,None)
question_types, demographic_questions, opinion_questions = (None,)*3
demographic_save_file = None
if data_type == "student":
question_types = settings.student_question_types
all_pdf_path = settings.student_stats_path
highlight_pdf_path = settings.student_stats_highlight_path
demographic_questions = settings.student_demographic_questions
opinion_questions = settings.student_opinion_questions
demographic_save_file = settings.student_categories_highlight_path
elif data_type == "fac_staff":
question_types = settings.fac_staff_question_types
all_pdf_path = settings.fac_staff_stats_path
highlight_pdf_path = settings.fac_staff_stats_highlight_path
demographic_questions = settings.fac_staff_demographics_questions
opinion_questions = settings.fac_staff_opinion_questions
demographic_save_file = settings.fac_staff_categories_highlight_path
data = load_data(data_type)
# correlation calculations
if stats in ["correlation", "all"]:
correlation_to_run = correlation.gen_num_correlations(data, question_types)
correlation_results = correlation.run_num_correlations(correlation_to_run, data)
interesting_correlations = correlation.find_interesting_correlations(
correlation_results, data)
correlation.print_interesting_correlations(interesting_correlations, data)
# plot all correlations
if not highlights:
all_pdf = PdfPages(all_pdf_path)
correlation.plot_correlations(correlation_results, data, all_pdf)
all_pdf.close()
# plot highlight correlations
highlight_pdf = PdfPages(highlight_pdf_path)
correlation.plot_correlations(interesting_correlations, data, highlight_pdf)
highlight_pdf.close()
print("Done with {} correlation stats.".format(data_type))
# category calculations
if stats in ["category", "all"]:
print("Staring demographic processing for {} data.".format(data_type))
bas
|
e_demographic = category.base_demographic(data, demographic_questions)
answer_response_lists = category.generate_answer_response_lists(
data, opinion_questions)
opinion_demogra
|
phic_dict = category.generate_demographic_for_response_lists(
answer_response_lists, data)
opinion_demographic_diff_dict = category.calc_demographic_diff(
base_demographic, opinion_demographic_dict)
interesting_demographic_changes = category.find_interesting_demographic_changes(
opinion_demographic_diff_dict)
category.save_interesting_demographics_changes_to_file(
interesting_demographic_changes, demographic_save_file
)
print("Ending {} data processing.".format(data_type))
def load_data(data_type):
print("Loading {} data.".format(data_type))
data = None
file_path = None
if data_type == "student":
file_path = settings.student_clean_path
elif data_type == "fac_staff":
file_path = settings.fac_staff_clean_path
print("Opening: {}".format(file_path))
with open(file_path, "r") as f:
print("Reading JSON into memory.")
data = json.loads(f.read())
print("Loaded {} {} records.".format(len(data), data_type))
print("Done loading {} data.".format(data_type))
return data
|
masterqa/MasterQA
|
setup.py
|
Python
|
mit
| 2,876
| 0
|
"""
The setup package to ins
|
tall MasterQA dependencies
"""
from setuptools import setup, find_packages # noqa
import os
import sys
this_directory = os.path.abspath(os.path.dirname(__file__))
long_description = None
total_descript
|
ion = None
try:
with open(os.path.join(this_directory, 'README.md'), 'rb') as f:
total_description = f.read().decode('utf-8')
description_lines = total_description.split('\n')
long_description_lines = []
for line in description_lines:
if not line.startswith("<meta ") and not line.startswith("<link "):
long_description_lines.append(line)
long_description = "\n".join(long_description_lines)
except IOError:
long_description = (
'Automation-Assisted Manual Testing - https://masterqa.com')
if sys.argv[-1] == 'publish':
reply = None
input_method = input
if not sys.version_info[0] >= 3:
input_method = raw_input # noqa
reply = str(input_method(
'>>> Confirm release PUBLISH to PyPI? (yes/no): ')).lower().strip()
if reply == 'yes':
print("\n*** Checking code health with flake8:\n")
os.system("python -m pip install 'flake8==3.9.2'")
flake8_status = os.system("flake8 --exclude=temp")
if flake8_status != 0:
print("\nWARNING! Fix flake8 issues before publishing to PyPI!\n")
sys.exit()
else:
print("*** No flake8 issues detected. Continuing...")
print("\n*** Rebuilding distribution packages: ***\n")
os.system('rm -f dist/*.egg; rm -f dist/*.tar.gz; rm -f dist/*.whl')
os.system('python setup.py sdist bdist_wheel') # Create new tar/wheel
print("\n*** Installing twine: *** (Required for PyPI uploads)\n")
os.system("python -m pip install 'twine>=1.15.0'")
print("\n*** Installing tqdm: *** (Required for PyPI uploads)\n")
os.system("python -m pip install --upgrade 'tqdm>=4.62.2'")
print("\n*** Publishing The Release to PyPI: ***\n")
os.system('python -m twine upload dist/*') # Requires ~/.pypirc Keys
print("\n*** The Release was PUBLISHED SUCCESSFULLY to PyPI! :) ***\n")
else:
print("\n>>> The Release was NOT PUBLISHED to PyPI! <<<\n")
sys.exit()
setup(
name='masterqa',
version='1.6.1',
description='Automation-Assisted Manual Testing - https://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms=["Windows", "Linux", "Mac OS-X"],
url='https://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase>=2.4.14',
'sbvirtualdisplay>=1.0.0',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
akx/shoop
|
shoop/xtheme/plugins/_base.py
|
Python
|
agpl-3.0
| 7,820
| 0.002046
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from shoop.apps.provides import (
get_identifier_to_object_map, get_provide_objects
)
from shoop.utils.text import space_case
from shoop.xtheme.plugins.consts import FALLBACK_LANGUAGE_CODE
from shoop.xtheme.plugins.forms import GenericPluginForm
SENTINEL = object()
class Plugin(object):
"""
A plugin that can be instantiated within a `shoop.xtheme.layout.LayoutCell`.
Other plugins should inherit from this class and register themselves in the
`xtheme_plugin` provide category.
"""
identifier = None
fields = []
required_context_variables = set()
name = _("Plugin") # User-visible name
editor_form_class = GenericPluginForm
def __init__(self, config):
"""
Instantiate a Plugin with the given `config` dictionary.
:param config: Dictionary of freeform configuration data
:type config: dict
"""
self.config = config
def is_context_valid(self, context):
"""
Check that the given rendering context is valid for rendering this plugin.
By default, just checks `required_context_variables`.
:param context: Rendering context
:type context: jinja2.runtime.Context
:return: True if we should bother trying to render this
:rtype: bool
"""
for key in self.required_context_variables:
if context.get(key, SENTINEL) is SENTINEL:
return False
return True
def render(self, context):
"""
Return the HTML for a plugin in a given rendering context.
:param context: Rendering context
:type context: jinja2.runtime.Context
:return: String of rendered content.
:rtype: str
"""
return "" # pragma: no cover
def get_editor_form_class(self):
"""
Return the form class for editing this plugin.
The form class should either derive from PluginForm, or at least have a `get_config()` method.
Form classes without `fields` are treated the same way as if you'd return `None`,
i.e. no configuration form is presented to the user.
:return: Editor form class
:rtype: class[forms.Form]|None
"""
# Could be overridden in suitably special subclasses.
if self.fields:
return self.editor_form_class
def get_translated_value(self, key, default=None, language=None):
"""
Get a translated value from the plugin's configuration.
It's assumed that translated values are stored in a ``{language: data, ...}`` dictionary
in the plugin configuration blob.
This is the protocol that `shoop.xtheme.plugins.forms.TranslatableField` uses.
If the configuration blob contains such a dictionary, but it does not contain
a translated value in the requested language does not exist, the fallback value, if any,
within that dictionary is tried next. Failing that, the ``default`` value is returned.
:param key: Configuration key
:type key: str
:param default: Default value to return when all else fails.
:param language: Requested language. Defaults to the active language.
:type language: str|None
:return: A translated value.
"""
value = self.config.get(key)
if not value:
return default
if isinstance(value, dict): # It's a dict, so assume it's something from TranslatableField
language = (language or get_language())
if language in value: # The language we requested exists, use that
return value[language]
if FALLBACK_LANGUAGE_CODE in value: # An untranslated fallback exists, use that
return value[FALLBACK_LANGUAGE_CODE]
return default # Fall back to the default, then
return value # Return the value itself; it's probably just something untranslated.
@classmethod
def load(cls, identifier):
"""
Get a plugin class based on the identifier from the `xtheme_plugin` provides registry.
:param identifier: Plugin class identifier
:type identifier: str
:return: A plugin class, or None
:rtype: class[Plugin]|None
"""
return get_identifier_to_object_map("xtheme_plugin").get(identifier)
@classmethod
def get_plugin_choices(cls, empty_label=None):
"""
Get a sorted list of 2-tuples (identifier and name) of available Xtheme plugins.
Handy for `<select>` boxen.
:param empty_label: Label for the "empty" choice. If falsy, no empty choice is prepended
:type empty_label: str|None
:return: List of 2-tuples
:rtype: Iterable[tuple[str, str]]
"""
choices = []
if empty_label:
choices.append(("", empty_label))
for plugin in get_provide_objects("xtheme_plugin"):
if plugin.identifier:
choices.append((
plugin.identifier,
getattr(plugin, "name", None) or plugin.identifier
))
choices.sort()
return choices
class TemplatedPlugin(Plugin):
# TODO: Document `TemplatedPlugin` better!
"""
Convenience base class for plugins that just render a "sub-template" with a given context.
"""
#:
|
The template to render
template_name = ""
#: Variables to copy from the parent context.
inherited_variables = set()
#: Variables to copy from the plugin configuration
config_copied_variables = set()
engine = None #
|
template rendering engine
def get_context_data(self, context):
"""
Get a context dictionary from a Jinja2 context.
:param context: Jinja2 rendering context
:type context: jinja2.runtime.Context
:return: Dict of vars
:rtype: dict[str, object]
"""
vars = {"request": context.get("request")}
for key in self.required_context_variables:
vars[key] = context.get(key)
for key in self.inherited_variables:
vars[key] = context.get(key)
for key in self.config_copied_variables:
vars[key] = self.config.get(key)
return vars
def render(self, context): # doccov: ignore
vars = self.get_context_data(context)
if self.engine:
template = self.engine.get_template(self.template_name)
else:
template = get_template(self.template_name)
return template.render(vars, request=context.get("request"))
def templated_plugin_factory(identifier, template_name, **kwargs):
"""
A factory (akin to `modelform_factory`) to quickly create simple plugins.
:param identifier: The unique identifier for the new plugin.
:type identifier: str
:param template_name: The template file path this plugin should render
:type template_name: str
:param kwargs: Other arguments for the `TemplatedPlugin`/`Plugin` classes.
:type kwargs: dict
:return: New `TemplatedPlugin` subclass
:rtype: class[TemplatedPlugin]
"""
ns = {
"identifier": identifier,
"template_name": template_name,
}
ns.update(kwargs)
ns.setdefault("name", space_case(identifier).title())
return type(str("%sPlugin" % identifier), (TemplatedPlugin,), ns)
|
hmcc/price-search
|
scraper/scraper.py
|
Python
|
mit
| 3,591
| 0.003063
|
"""
This module contains a single class that manages the scraping of data
from one or more supermarkets on mysupermarket.co.uk
"""
from datetime import datetime
from os import remove
from os.path import isfile, getmtime
from time import time
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.utils.project import get_project_settings
from app_config import supermarket_names, supermarket_url, supermarket_filename
from .reactor_control import ReactorControl
from .spiders.mysupermarket import MySupermarketSpider
class CachingScraper():
"""
A "crawler manager" that manages scraping mysupermarket.co.uk for one or
more supermarkets. For each supermarket, it checks the cache file then
creates and starts a crawler if appropriate.
"""
def __init__(self, supermarkets=supermarket_names(), force_refresh=False):
"""Create a CachingScraper for the given supermarket(s).
Keyword arguments:
supermarkets -- a list of supermarkets to scrape
force_refresh -- if True, cachefiles will not be used
"""
self.force_refresh = force_refresh
self.supermarkets = supermarkets
self.reactor_control = ReactorControl()
def cache_exists(self, supermarket):
"""Check whether a JSON file already exists for data scraped from
the given supermarket, and if so, whether it was created today.
Note that 'created today' is not the same as 'age < 24 hours'. Prices
are assumed to change overnight so a cachefile created at 9pm
yesterday is considered out of date at 9am today (but a cachefile
created at 9am is not out of date at 9pm).
Keyword arguments:
supe
|
rmarket -- the supermarket whose cachefile should be checked
"""
cachefile = supermarket_filename(supermarket)
if not isfile(cachefile):
return False
mtime = datetime.fromtimestamp(getmtime(cachefile))
now = datetime.fromtimestamp(time())
return mtime.day == now.day
def setup_crawler(self, supermarket, reactor_control):
"""Set up the Scrapy cr
|
awler.
See http://doc.scrapy.org/en/latest/topics/practices.html#run-scrapy-from-a-script.
Keyword arguments:
supermarket -- the supermarket whose crawler should be set up
"""
cachefile = supermarket_filename(supermarket)
if isfile(cachefile):
remove(cachefile)
settings = get_project_settings()
url = supermarket_url(supermarket)
settings.set('FEED_URI', supermarket_filename(supermarket))
spider = MySupermarketSpider(url)
crawler = Crawler(settings)
crawler.signals.connect(reactor_control.remove_crawler, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
reactor_control.add_crawler()
def get_data(self):
"""Main entry point for the scraper class. Crawl or get data from cache
for the configured supermarkets. Supermarkets are set in __init__.
"""
if self.force_refresh:
supermarkets_to_crawl = self.supermarkets
else:
supermarkets_to_crawl = [x for x in self.supermarkets if not self.cache_exists(x)]
if supermarkets_to_crawl:
reactor_control = ReactorControl()
for supermarket in supermarkets_to_crawl:
self.setup_crawler(supermarket, reactor_control)
reactor_control.start_crawling()
|
pacificIT/mopidy
|
mopidy/file/library.py
|
Python
|
apache-2.0
| 4,786
| 0
|
from __future__ import unicode_literals
import logging
import operator
import os
import sys
import urllib2
from mopidy import backend, exceptions, models
from mopidy.audio import scan, utils
from mopidy.internal import path
logger = logging.getLogger(__name__)
FS_ENCODING = sys.getfilesystemencoding()
class FileLibraryProvider(backend.LibraryProvider):
"""Library for browsing local files."""
# TODO: get_images that can pull from metadata and/or .folder.png etc?
# TODO: handle playlists?
@property
def root_directory(self):
if not self._media_dirs:
return None
elif len(self._media_dirs) == 1:
uri = path.path_to_uri(self._media_dirs[0]['path'])
else:
uri = 'file:root'
return models.Ref.directory(name='Files', uri=uri)
def __init__(self, backend, config):
super(FileLibraryProvider, self).__init__(backend)
self._media_dirs = list(self._get_media_dirs(config))
self._follow_symlinks = config['file']['follow_symlinks']
self._show_dotfiles = config['file']['show_dotfiles']
self._scanner = scan.Scanner(
timeout=config['file']['metadata_timeout'])
def browse(self, uri):
logger.debug('Browsing files at: %s', uri)
result = []
local_path = path.uri_to_path(uri)
if local_path == 'root':
return list(self._get_media_dirs_refs())
if not self._is_in_basedir(os.path.realpath(local_path)):
logger.warning(
'Rejected attempt to browse path (%s) outside dirs defined '
'in file/media_dirs config.', uri)
return []
for dir_entry in os.listdir(local_path):
child_path = os.path.join(local_path, dir_entry)
uri = path.path_to_uri(child_path)
if not self._show_dotfiles and dir_entry.startswith(b'.'):
continue
if os.path.islink(child_path) and not self._follow_symlinks:
logger.debug('Ignoring symlink: %s', uri)
continue
if not self._is_in_basedir(os.path.realpath(child_path)):
logger.debug('Ignoring symlink to outside base dir: %s', uri)
continue
name = dir_entry.decode(FS_ENCODING, 'replace')
if os.path.isdir(child_path):
result.append(models.Ref.directory(name=name, uri=uri))
elif os.path.isfile(child_path):
result.append(models.Ref.track(name=name, uri=uri))
result.sort(key=operator.attrgetter('name'))
return result
def lookup(self, uri):
logger.debug('Looking up file URI: %s', uri)
local_path = path.uri_to_path(uri)
try:
result = self._scanner.scan(uri)
track = utils.convert_tags_to_track(result.tags).copy(
uri=uri, length=result.duration)
except exceptions.ScannerError as e:
logger.warning('Failed looking up %s: %s', uri, e)
track = models.Track(uri=uri)
if not track.name:
filename = os.path.basename(local_path)
name = urllib2.unquote(filename).decode(FS_ENCODING, 'replace')
track = track.copy(name=name)
return [track]
def _get_media_dirs(self, config):
for entry in config['file']['media_dirs']:
media_dir = {}
media_dir_split = entry.split('|', 1)
local_path = path.expand_path(
media_dir_split[0].encode(FS_ENCODING))
if not local_path:
logger.debug(
'Failed expanding path (%s) from file/media_dirs config '
'value.',
media_dir_split[0])
continue
elif not os.path.isdir(local_path):
logger.warning(
'%s is not a directory. Please create the directory or '
'update the file/media_dirs config value.', local_path)
continue
media_dir['path'] = local_path
if len(media_dir_split) == 2:
media_dir['name'] = media_dir_split[1]
else:
# TODO Mpd client sho
|
uld accept / in dir name
media_dir['name'] = media_dir_split[0].replace(os.sep, '+')
yield media_dir
def _get_media_dirs_refs(self):
for media_dir in self._media_dirs:
yield models.Ref.directory(
name=media_dir['name'],
uri=path.path_to_uri(media_dir['path']))
def _is_in_ba
|
sedir(self, local_path):
return any(
path.is_path_inside_base_dir(local_path, media_dir['path'])
for media_dir in self._media_dirs)
|
faisaltheparttimecoder/EMEARoster
|
MyRoster/views.py
|
Python
|
mit
| 1,122
| 0.000891
|
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from Core.models import RosterAudit, RosterUser
@login_required
@csrf_exempt
def myroster_rows(request):
"""
Obtain all the rows for the connected user is scheduled on.
"""
# Email address
email = request.user.email
# Current yea
|
r
date_now = datet
|
ime.now().date()
# Variables
connected_username = ""
collector = []
# Get the name of the user based on email from the roster table
# we not relying on the username that can be obtained via request
# Since anyone can enter anyname and it will be become hard to
# manage.
for user in RosterUser.objects.filter(email=email):
connected_username = user.first_name + " " + user.last_name
for audit in RosterAudit.objects.filter(engineer=connected_username).filter(audit_date_field__gte=date_now).order_by('audit_date_field'):
collector.append(audit.audit_date_field)
return JsonResponse(collector, safe=False)
|
elkingtowa/pyrake
|
tests/test_djangoitem/models.py
|
Python
|
mit
| 440
| 0.002273
|
from django.db import models
class Person(models.Model):
name = models.CharField(max_length=255, defa
|
ult='Robot')
age = models.IntegerField()
class Meta:
app_label = 'test_djangoitem'
class IdentifiedPerson(models.Model):
identifier = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=255)
age = models.IntegerField()
class Meta:
app_label
|
= 'test_djangoitem'
|
ControCurator/controcurator
|
python_code/anchorGenerator.py
|
Python
|
mit
| 1,229
| 0.026037
|
# anchorGenerator
from models.anchor import *
# main function
if __name__=='__main__':
# TEMP: Wipe existing anchors
# anchors = Anchor.all(size=1000)
# Anchor.delete_all(anchors)
# THIS IS TEMPORARY:
anchors = {'Vaccination', 'Vaccinations', 'Vaccine', 'Vaccines', 'Inoculation', 'Immunization', 'Shot', 'Chickenpox', 'Disease', 'Diseases', 'Hepatitis A', 'Hepatitis B', 'infection', 'infections', 'measles', 'outbreak', 'mumps', 'rabies', 'tetanus', 'virus', 'autism'}
seed = 'vaccination'
for anchor in anchors:
a = Anchor.getOrCreate(
|
anchor)
a.findInstances()
a.save()
"""
query = {
"size": 0,
"query": {
"filtered": {
"query": {
"query_string": {
"query": "*",
"analyze_wildcard": True
}
}
}
},
"aggs": {
|
"2": {
"terms": {
"field": "title",
"size": 100,
"order": {
"_count": "desc"
}
}
}
}
}
response = es.search(index="crowdynews"', 'body=query)
retrieved = now()
anchors = {}
# go through each retrieved document
for hit in response['aggregations']['2']['buckets']:
key = hit['key']
if validKey(key):
anchors[key] = hit['doc_count']
addBulk(anchors)
"""
|
Open-E-WEB/django-powerpages
|
powerpages/tests/test_admin.py
|
Python
|
mit
| 7,705
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from powerpages.models import Page
from powerpages.sync import PageFileDumper
from powerpages.admin import website_link, sync_status, save_page
from powerpages.signals import page_edited
from .test_sync import BaseSyncTestCase
class WebsiteLinkTestCase(TestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(website_link(None))
def test_empty_url(self):
self.assertEqual(
website_link(Page(url='')),
'<a href="" style="font-weight: normal;"> »</a>'
)
def test_root_url(self):
self.assertEqual(
website_link(Page(url='/')),
'<a href="/" style="font-weight: normal;">/ »</a>'
)
def test_first_level_url(self):
self.assertEqual(
website_link(Page(url='/test/')),
'<a href="/test/" style="font-weight: normal;">'
'/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_second_level_url(self):
self.assertEqual(
website_link(Page(url='/nested/test/')),
'<a href="/nested/test/" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_file(self):
self.assertEqual(
website_link(Page(url='/robots.txt')),
'<a href="/robots.txt" style="font-weight: normal;">'
'/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
def test_nested_file(self):
self.assertEqual(
website_link(Page(url='/nested/robots.txt')),
'<a href="/nested/robots.txt" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
class SyncStatusTestCase(BaseSyncTestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(sync_status(None))
def test_file_synced(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
self.assertEqual(
sync_status(page),
'<span style="color: green">File is synced</span>'
)
def test_file_content_differs(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = 'Lorem Ipsum'
page.save()
self.assertEqual(
sync_status(page),
'<span style="color: orange">File content differs</span>'
)
def test_file_is_missing(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
self.assertEqual(
sync_status(page),
'<span style="color: red">File is missing</span>'
)
def test_file_content_differs_modified_in_admin(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = 'Lorem Ipsum'
page.is_dirty = True # modified in Admin
page.save()
self.assertEqual(
sync_status(page),
'<span style="color:black; font-weight:bold">'
'Changed in Admin!</span><br>'
'<span style="color: orange">File content differs</span>'
)
class SavePageTestCase(TestCase):
maxDiff = None
def setUp(self):
def page_edited_test_handler(sender, **kwargs):
self.page_edited_kwargs = kwargs
self.page_edited_kwargs = None
page_edited.connect(
page_edited_test_handler, dispatch_uid='test_page_edited',
weak=False
)
def tearDown(self):
page_edited.disconnect(dispatch_uid='test_page_edited')
self.page_edited_kwargs = None
def test_create_page(self):
page = Page(url='/test-page/')
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=True)
self.assertIsNotNone(page.pk)
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': True},
self.page_edited_kwargs
)
def test_modify_page(self):
page = Page.objects.create(url='/test-page/
|
', title='Lorem')
page.title = 'Ipsum'
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=False)
self.assertEqual(Page.objects.get(pk=page.pk).title, 'Ipsum')
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': False},
self.pa
|
ge_edited_kwargs
)
class SwitchEditModeViewTestCase(TestCase):
maxDiff = None
def setUp(self):
self.url = reverse('switch_edit_mode')
self.staff_member = User.objects.create_user(
'staff_member', password='letmein123', is_staff=True
)
self.super_user = User.objects.create_user(
'super_user', password='letmein123', is_superuser=True
)
self.regular_user = User.objects.create_user(
'regular_user', password='letmein123'
)
Page.objects.create(url='/')
Page.objects.create(url='/test-page/')
def test_enable_edit_mode_staff_member_referrer(self):
self.client.login(username='staff_member', password='letmein123')
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/test-page/')
def test_disable_edit_mode_staff_member_no_referrer(self):
self.client.login(username='staff_member', password='letmein123')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url)
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/')
def test_enable_edit_mode_super_user_no_referrer(self):
self.client.login(username='super_user', password='letmein123')
response = self.client.get(self.url)
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/')
def test_disable_edit_mode_super_user_referrer(self):
self.client.login(username='super_user', password='letmein123')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/test-page/')
def test_access_forbidden_regular_user(self):
self.client.login(username='regular_user', password='letmein123')
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
)
def test_access_forbidden_anonmous(self):
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
)
|
danalec/dotfiles
|
sublime/.config/sublime-text-3/Packages/anaconda_php/anaconda_php.py
|
Python
|
mit
| 673
| 0
|
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""AnacondaPHP is a PHP linting plugin for Sublime Text 3
"""
from .plugin_version import anaconda_required_version
from .anaconda_lib.anaconda_plugin import anaconda_version
|
if anaconda_required_version > anaconda_version:
raise RuntimeError(
'AnacondaPHP requires version {} or better of anaconda but {} '
'is installed'.format(
'.'.join([
|
str(i) for i in anaconda_required_version]),
'.'.join([str(i) for i in anaconda_version])
)
)
from .commands import *
from .listeners import *
|
yarabarla/python-challenge
|
3.py
|
Python
|
mit
| 812
| 0.022167
|
# http://www.pythonchallenge.com/pc/def/equality.html
import re
file_ob = open("3.dat", 'r')
ob_read = file_ob.read()
read_arr = list(ob_read)
word = []
def for_loop(): # Loops through array to find solution
for i in range(len(read_arr)):
if (i + 8) > len(read_arr): # To keep index in bounds
break
if not(read_arr[i]).isupper() and (read_arr[i + 1]).isupper() and (read_arr[i + 2]).isupper() and (read_arr[i + 3]).isupper() and(read_arr[i + 4]).islower() and (read_arr[i + 5]).isupper() and (read_arr[i + 6]).isupper() and (read_arr[i + 7]).isupper() and not(read_arr[i + 8]).isupper():
word.append
|
(read_arr[i + 4])
print "".join(word)
def reg
|
_ex(): # Uses regex to find the pattern
print "".join( re.findall("[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]", ob_read))
# for_loop()
# reg_ex()
|
zwadar/pyqode.python
|
pyqode/python/managers/file.py
|
Python
|
mit
| 2,598
| 0
|
"""
Contains the python specific FileManager.
"""
import ast
import re
from pyqode.core.api import TextBlockHelper
from pyqode.core.managers import FileManager
class PyFileManager(FileManager):
"""
Extends file manager to override detect_encoding. With python, we can
detect encoding by reading the two first lines of a file and extracting its
encoding tag.
"""
#: True to fold import statements on open.
fold_imports = False
#: True to fold docstring on open
fold_docstrings = False
def detect_encoding(self, path):
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
.. note:: code taken and adapted from
```jedi.common.source_to_unicode.detect_encoding```
"""
with open(path, 'rb') as file:
source = file.read()
# take care of line encodings (not in jedi)
source = source.replace(b'\r', b'')
source_str = str(source).replace('\\n', '\n')
byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
return 'UTF-8'
def open(self, path, encoding=None, use_cached_encoding=True):
encoding = self.detect_encoding(path)
super(PyFileManager, self).open(
path, encoding=encoding, use_cached_encoding=use_cached_e
|
ncoding)
try:
folding_panel = self.editor.panels.get('FoldingPanel')
except KeyError:
pass
else:
# fold imports and/or doc
|
strings
blocks_to_fold = []
sh = self.editor.syntax_highlighter
if self.fold_imports and sh.import_statements:
blocks_to_fold += sh.import_statements
if self.fold_docstrings and sh.docstrings:
blocks_to_fold += sh.docstrings
for block in blocks_to_fold:
if TextBlockHelper.is_fold_trigger(block):
folding_panel.toggle_fold_trigger(block)
def clone_settings(self, original):
super(PyFileManager, self).clone_settings(original)
self.fold_docstrings = original.fold_docstrings
self.fold_imports = original.fold_imports
|
tryexceptpass/sofi
|
sofi/ui/anchor.py
|
Python
|
mit
| 1,243
| 0.004023
|
from .element import Element
class Anchor(Element):
"""Implements the <a> tag"""
def __init__(self, text=None, href="#", cl=None, ident=None, style=None, attrs=None):
super().__init__(cl=cl, ident=ident, style=style, attrs=attrs)
self.href = href
if text:
self._children.append(text)
def __repr__(self):
return "<Anchor(href='" + self.href + "')>"
def __str__(self):
output = [ "<a" ]
if self.ident:
out
|
put.append(" id=\"")
output.append(self.ident)
output.append("\"")
if self.cl:
output.append(" class=\"")
output.append(self.cl)
output.append("\"")
output.append(' href="')
output.append(self.href)
output.append('"')
if self.style:
output.append(" style=\"")
output.append(self.style)
output.append("\"")
if self.attrs:
|
for k in self.attrs.keys():
output.append(' ' + k + '="' + self.attrs[k] + '"')
output.append(">")
for child in self._children:
output.append(str(child))
output.append("</a>")
return "".join(output)
|
adrianomargarin/py-notacarioca
|
notacarioca/settings.py
|
Python
|
apache-2.0
| 377
| 0.007958
|
URL = {
3304
|
557: {
"production": "https://notacarioca.rio.gov.br/WSNacional/nfse.asmx?wsdl",
"sandbox": "https://homologacao.notacarioca.rio.gov.br/WSNacional/nfse.asmx?wsdl"
}
}
TEMPLATES = {
'send_rps': "GerarNfseEnvio.xml",
'status': "ConsultarNfseEnvio.xml",
'get_nfse': "Consult
|
arNfseEnvio.xml",
'cancel': "CancelarNfseEnvio.xml"
}
|
adaptivethreat/Empire
|
lib/modules/powershell/situational_awareness/host/antivirusproduct.py
|
Python
|
bsd-3-clause
| 4,095
| 0.012698
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-AntiVirusProduct',
'Author': ['@mh4x0f', 'Jan Egil Ring'],
'Description': ('Get antivirus product information.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://blog.powershell.no/2011/06
|
/12/use-windows-powershell-to-get-antivirus-product-information/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Age
|
nt to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Computername to run the module on, defaults to localhost.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = """
function Get-AntiVirusProduct {
[CmdletBinding()]
param (
[parameter(ValueFromPipeline=$true, ValueFromPipelineByPropertyName=$true)]
[Alias('name')]
$ComputerName=$env:computername )
$Query = 'select * from AntiVirusProduct'
$AntivirusProduct = Get-WmiObject -Namespace 'root\SecurityCenter2' -Query $Query @psboundparameters -ErrorVariable myError -ErrorAction 'SilentlyContinue'
switch ($AntiVirusProduct.productState) {
'262144' {$defstatus = 'Up to date' ;$rtstatus = 'Disabled'}
'262160' {$defstatus = 'Out of date' ;$rtstatus = 'Disabled'}
'266240' {$defstatus = 'Up to date' ;$rtstatus = 'Enabled'}
'266256' {$defstatus = 'Out of date' ;$rtstatus = 'Enabled'}
'393216' {$defstatus = 'Up to date' ;$rtstatus = 'Disabled'}
'393232' {$defstatus = 'Out of date' ;$rtstatus = 'Disabled'}
'393488' {$defstatus = 'Out of date' ;$rtstatus = 'Disabled'}
'397312' {$defstatus = 'Up to date' ;$rtstatus = 'Enabled'}
'397328' {$defstatus = 'Out of date' ;$rtstatus = 'Enabled'}
'397584' {$defstatus = 'Out of date' ;$rtstatus = 'Enabled'}
default {$defstatus = 'Unknown' ;$rtstatus = 'Unknown'}
}
$ht = @{}
$ht.Computername = $ComputerName
$ht.Name = $AntiVirusProduct.displayName
$ht.ProductExecutable = $AntiVirusProduct.pathToSignedProductExe
$ht.'Definition Status' = $defstatus
$ht.'Real-time Protection Status' = $rtstatus
New-Object -TypeName PSObject -Property $ht
}
Get-AntiVirusProduct """
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(self.info["Name"])+' completed!";'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
ravenscroftj/freecite
|
setup.py
|
Python
|
mit
| 446
| 0.042601
|
from setuptools import setup, find_packages
setup(
name = "FreeCite",
version = "0.1",
py_modules = ['freecite'],
#install requirements
|
install_requires = [
'requests==1.1.0'
],
#author details
author = "James Ravenscroft",
author_email = "ravenscroftj@gmail.com",
|
description = "A wrapper around the FreeCite REST API",
url = "http://wwww.github.com/ravenscroftj/freecite"
)
|
degibenz/vispa-chat
|
src/api/chat_ws.py
|
Python
|
mit
| 5,947
| 0.002097
|
# -*- coding: utf-8 -*-
__author__ = 'degibenz'
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
import json
from aiohttp import web
from core.model import ObjectId
from core.exceptions import *
from models.chat import *
from models.client import Client, Token
__all__ = [
'ChatWS'
]
DEBUG = True
class ChatWS(web.View):
ws = None
response = None
chat = None
client = None
chat_pk = None
client_pk = None
db = None
client_in_chat = None
def __init__(self, request):
try:
self.db = request.app['db']
except(KeyError,):
pass
self.chat_pk = request.match_info.get('id')
super(ChatWS, self).__init__(request)
async def check_receiver(self, receiver: ObjectId):
"""
Метод проверяет, что получатель существует и находится в чате с отправителем.
:param receiver: индификатор получателя
"""
client = Client(
pk=ObjectId(receiver)
)
|
if self.db:
client.db = self.db
await client.get()
q = {
'chat': self.chat_pk,
'client': ObjectId(receiver)
}
if not await self.c
|
lient_in_chat.get(**q):
self.client_in_chat.save(**q)
async def prepare_msg(self):
async for msg in self.socket:
content = json.loads(msg.data)
receiver = content.get('receiver', None)
if receiver:
await self.check_receiver(receiver)
receiver = ObjectId(receiver)
msg_obj = MessagesFromClientInChat(
chat=self.chat_pk,
client=self.client_pk,
msg=content.get('msg'),
receiver_message=receiver
)
if self.db:
msg_obj.db = self.db
await msg_obj.save()
for item in self.agents:
await self.notify(
sender=item.get('client_uid'),
message=msg_obj.message_content,
socket=item.get('socket'),
receiver=receiver,
)
async def check_client(self):
token_in_header = self.request.__dict__.get('headers').get('AUTHORIZATION', None)
if not token_in_header:
raise TokeInHeadersNotFound
else:
token = Token()
token.token = token_in_header
if self.db:
token.db = self.db
self.client = await token.find_client_by_key()
if not self.client:
raise TokenIsNotFound
self.client_pk = ObjectId(self.client.get('client'))
async def notify(self, sender: ObjectId, message: str, socket: web.WebSocketResponse, receiver: ObjectId = None, ):
"""
Метод для рассылки сообщений всем участникам или выбранному пользователю в чате
:param sender:
:param socket:
:param message: текст сообщения
:param receiver: индификатор пользователя, который должен получить это сообщение
"""
async def _notify():
try:
if not socket.closed:
socket.send_str(
data="{}".format(message)
)
except(Exception,) as error:
error_info = {
'action': 'notify',
'receiver': receiver,
'sender': sender,
'error': '{}'.format(error)
}
log.error(error_info)
if receiver:
message = "@{}: {}".format(receiver, message)
await _notify()
async def mark_client_as_offline(self):
q = {
'chat': self.chat_pk,
'client': self.client_pk
}
await self.client_in_chat.objects.update(
q,
{'$set':
{'online': False}
},
upsert=False
)
@property
def socket(self):
for item in self.agents:
if item.get('client_uid') == self.client_pk:
return item.get('socket')
@property
def agents(self):
result = []
for ws in self.request.app['websockets']:
if ws.get('chat_uid') == self.chat_pk:
result.append(ws)
return result
async def get(self):
try:
self.ws = web.WebSocketResponse()
await self.ws.prepare(self.request)
self.chat_pk = ObjectId(self.chat_pk)
chat = Chat(
pk=self.chat_pk
)
if self.db:
chat.db = self.db
self.chat = await chat.get()
await self.check_client()
self.client_in_chat = ClientsInChatRoom(
chat=self.chat_pk,
client=self.client_pk,
)
if self.db:
self.client_in_chat.db = self.db
await self.client_in_chat.add_person_to_chat()
self.request.app['websockets'].append({
"socket": self.ws,
"client_uid": self.client_pk,
'chat_uid': self.chat_pk
})
for _ws in self.agents:
_ws.get('socket').send_str('%s joined' % self.client_pk)
await self.prepare_msg()
except(Exception,) as error:
self.response = {
'status': False,
'error': "{}".format(error)
}
log.error(self.response)
await self.ws.close()
finally:
return self.ws
|
srittau/python-htmlgen
|
htmlgen/image.py
|
Python
|
mit
| 952
| 0
|
from .attribute import html_attribute
from .element import VoidElement
class Image(VoidElement):
"""An HTML image (<img>) element.
Images must have an alternate text description that describes the
contents of the image, if the image can not be displayed. In some
cases the alternate text can be empty. For example, if the ima
|
ge just
displays a company logo next to the company's name or if the image just
adds an icon next to a textual description of an action.
Example:
>>> image = Image("whiteboard.jpg",
... "A whiteboard filled with mathematical formulas.")
>>> image.title = "Whiteboards are a useful tool"
"""
def __init__(self, url, alternate_text=""):
s
|
uper().__init__("img")
self.url = url
self.alternate_text = alternate_text
url = html_attribute("src")
alternate_text = html_attribute("alt")
title = html_attribute("title")
|
mumax/2
|
tests/reduce.py
|
Python
|
gpl-3.0
| 704
| 0.012784
|
from mumax2 import *
# Standard Problem 4
Nx = 32
Ny = 32
Nz = 1
setgridsize(Nx, Ny, Nz)
setcellsize(500e-9/Nx, 125e-9/Ny, 3e-9/Nz)
load('micromagnetism')
load('solver/rk12')
|
setv('Msat', 800e3)
setv('demag_acc', 7)
setv('Aex', 1.3e-11)
setv('alpha', 1)
setv('dt', 1e-12)
setv('m_maxerror', 1./1000)
new_maxabs("my_maxtorque", "torque")
new_maxnorm("m
|
axnorm", "torque")
m=[ [[[1]]], [[[1]]], [[[0]]] ]
setarray('m', m)
t1=getv("maxtorque")
t2=getv("my_maxtorque")
t3=getv("maxnorm")
echo("maxtorque:" + str(t1) + " my_maxtorque:" + str(t2) + " maxnorm:" + str(t3))
if t3 != t1:
crash
if t3 < t2:
crash
new_maxabs("maxtorquez", "torque.z")
getv("maxtorquez")
printstats()
savegraph("graph.png")
|
wwj718/ANALYSE
|
pavelib/utils/test/suites/nose_suite.py
|
Python
|
agpl-3.0
| 5,609
| 0
|
"""
Classes used for defining and running nose test suites
"""
import os
from paver.easy import call_task
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites import TestSuite
from pavelib.utils.envs import Env
__test__ = False # do not collect
class NoseTestSuite(TestSuite):
"""
A subclass of TestSuite with extra methods that are specific
to nose tests
"""
def __init__(self, *args, **kwargs):
super(NoseTestSuite, self).__init__(*args, **kwargs)
self.failed_only = kwargs.get('failed_only', False)
self.fail_fast = kwargs.get('fail_fast', False)
self.run_under_coverage = kwargs.get('with_coverage', True)
self.report_dir = Env.REPORT_DIR / self.root
self.test_id_dir = Env.TEST_DIR / self.root
self.test_ids = self.test_id_dir / 'noseids'
def __enter__(self):
super(NoseTestSuite, self).__enter__()
self.report_dir.makedirs_p()
self.test_id_dir.makedirs_p()
def __exit__(self, exc_type, exc_value, traceback):
"""
Cleans mongo afer the tests run.
"""
super(NoseTestSuite, self).__exit__(exc_type, exc_value, traceback)
test_utils.clean_mongo()
def _under_coverage_cmd(self, cmd):
"""
If self.run_under_coverage is True, it returns the arg 'cmd'
altered to be run under coverage. It returns the command
unaltered otherwise.
"""
if self.run_under_coverage:
cmd0, cmd_rest = cmd.split(" ", 1)
# We use "python -m coverage" so that the proper python
# will run the importable coverage rather than the
# coverage that OS path finds.
if not cmd0.endswith('.py'):
cmd0 = "`which {}`".format(cmd0)
cmd = (
"python -m coverage run --rcfile={root}/.coveragerc "
"{cmd0} {cmd_rest}".format(
root=self.root,
cmd0=cmd0,
cmd_rest=cmd_rest,
)
)
return cmd
@property
def test_options_flags(self):
"""
Takes the test options and returns the appropriate flags
for the command.
"""
opts = " "
# Handle "--failed" as a special case: we want to re-run only
# the tests that failed within our Django apps
# This sets the --failed flag for the nosetests command, so this
# functionality is the same as described in the nose documentation
if self.failed_only:
opts += "--failed"
# This makes it so we use nose's fail-fast feature in two cases.
# Case 1: --fail_fast is passed as an arg in the paver command
# Case 2: The environment variable TESTS_FAIL_FAST is set as True
env_fail_fast_set = (
'TESTS_FAIL_FAST' in os.environ and os.environ['TEST_FAIL_FAST']
)
if self.fail_fast or env_fail_fast_set:
opts += " --stop"
return opts
class SystemTestSuite(NoseTestSuite):
"""
TestSuite for lms and cms nosetests
"""
def __init__(self, *args, **kwargs):
super(SystemTestSuite, self).__init__(*args, **kwargs)
self.test_id = kwargs.get('test_id', self._default_test_id)
self.fasttest = kwargs.get('fasttest', False)
def __enter__(self):
super(SystemTestSuite, self).__enter__()
@property
def cmd(self):
cmd = (
'./manage.py {system} test --verbosity={verbosity} '
'{test_id} {test_opts} --traceback --settings=test'.format(
system=self.root,
verbosity=se
|
lf.verbosity,
test_id=self.test_id,
test_opts=self.test_options_flags,
)
)
return self._under_coverage_cmd(cmd)
@property
def _default_test_id(self):
"""
If no test id is provided, we need to limit the test runner
to the Djangoapps we want to test. Otherwise, it will
run tests on all installed packages. We do this by
|
using a default test id.
"""
# We need to use $DIR/*, rather than just $DIR so that
# django-nose will import them early in the test process,
# thereby making sure that we load any django models that are
# only defined in test files.
default_test_id = "{system}/djangoapps/* common/djangoapps/*".format(
system=self.root
)
if self.root in ('lms', 'cms'):
default_test_id += " {system}/lib/*".format(system=self.root)
if self.root == 'lms':
default_test_id += " {system}/tests.py".format(system=self.root)
return default_test_id
class LibTestSuite(NoseTestSuite):
"""
TestSuite for edx-platform/common/lib nosetests
"""
def __init__(self, *args, **kwargs):
super(LibTestSuite, self).__init__(*args, **kwargs)
self.test_id = kwargs.get('test_id', self.root)
self.xunit_report = self.report_dir / "nosetests.xml"
@property
def cmd(self):
cmd = (
"nosetests --id-file={test_ids} {test_id} {test_opts} "
"--with-xunit --xunit-file={xunit_report} "
"--verbosity={verbosity}".format(
test_ids=self.test_ids,
test_id=self.test_id,
test_opts=self.test_options_flags,
xunit_report=self.xunit_report,
verbosity=self.verbosity,
)
)
return self._under_coverage_cmd(cmd)
|
openshine/osweb
|
osweb/projects/__init__.py
|
Python
|
gpl-3.0
| 108
| 0.009259
|
from
|
osweb.projects.ManageProject import ManageProject
from osweb.projects.projects_data import ProjectsDat
|
a
|
alberthdev/pyradmon
|
pyradmon/dummymp/loghandler.py
|
Python
|
apache-2.0
| 2,314
| 0.008643
|
#!/usr/bin/env python
# DummyMP - Multiprocessing Library for Dummies!
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Licen
|
se is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, e
|
ither express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# DummyMP Library - Logging Redirect Handler
# multiprocessing library for dummies!
# (library for easily running functions in parallel)
#
import logging
import config
import os
class DummyMPLogHandler(logging.Handler):
"""DummyMP logging handler to allow multiprocess logging.
This class is a custom logging handler to allow spawned processes
(from :py:mod:`multiprocessing`) to log without any issues. This
works by intercepting emitted log records, and sending them via
queue to the master process. The master process will process each
record and call :py:meth:`logging.Logger.handle` to emit the
logging record at the master process level.
Note that this class can be used as a general multiprocess logging
handler simply by removing the int_pid attribute.
Attributes:
queue (:py:class:`multiprocessing.Queue`): The Queue object to
forward logging records to.
int_pid (int): The internal PID used to reference the process.
"""
def __init__(self, int_pid, queue):
"""Initializes DummyMPLogHandler with the inputted internal PID
and Queue object."""
logging.Handler.__init__(self)
self.queue = queue
self.int_pid = int_pid
def emit(self, record):
"""Method override to forward logging records to the internal
Queue object."""
try:
# Format: [ [queueMsgID, PID, internal PID], record ]
self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])
except:
# Something went wrong...
self.handleError(record)
|
siosio/intellij-community
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_0/_mod0_1_1_0_0_2.py
|
Python
|
apache-2.0
| 128
| 0.007813
|
name0_1_1_0_0_2_0 = None
name0_1_1_0_0_2_1 = None
name0_1_1_0_0_2_2 = None
name0_1_1_0_0_2_
|
3 = None
name0_1_1_0_0_2
|
_4 = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.