repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Fakor/congov
|
cli/lib/command.py
|
Python
|
mit
| 530
| 0.001887
|
import collections
class Command:
def __init__(self, template):
self._values = template
def __getitem__(self, key):
value = self._values[key]
if isinstance(value, dict) or isinstance(value, list):
return
|
Command(value)
return self._values[key]
def __setitem__(self, key, value):
try:
self._values[key] = type(self._va
|
lues[key])(value)
except ValueError:
raise CommandSetValueError
class CommandSetValueError(Exception):
pass
|
twitterdev/data-ads-sample
|
home/frequency.py
|
Python
|
mit
| 1,443
| 0.000693
|
from django.conf import settings
from gnip_search.gnip_search_api import GnipSearchAPI
from gnip_search.gnip_search_api import QueryError as GNIPQueryError
class Frequency:
"""
Class collection for Frequency
"""
DATE_FORMAT = "%Y-%m-%d %H:
|
%M"
def __init__(self, query, sample, start, end):
self.query = query
self.sample = sample
self.start = start
self.end = end
self.freq = self.get(self.get_da
|
ta())
def get_data(self):
"""
Returns data for frequency in list view
"""
# New gnip client with fresh endpoint (this one sets to counts.json)
g = GnipSearchAPI(settings.GNIP_USERNAME,
settings.GNIP_PASSWORD,
settings.GNIP_SEARCH_ENDPOINT,
paged=True)
timeline = None
try:
timeline = g.query_api(
self.query, self.sample, use_case="wordcount", start=self.start.strftime(
self.DATE_FORMAT), end=self.end.strftime(
self.DATE_FORMAT), csv_flag=False)
except GNIPQueryError as e:
print(e)
result = g.freq.get_tokens(20)
return result
def get(self, data):
response_data = []
for f in data:
response_data.append(f)
response_data = sorted(response_data, key=lambda f: -f[3])
return response_data
|
industrydive/fileflow
|
fileflow/operators/__init__.py
|
Python
|
apache-2.0
| 141
| 0
|
from dive_operat
|
or import DiveOperator
from dive_python_operator import DivePythonOperator
__all__ = ['DiveOperator', 'DivePythonOpe
|
rator']
|
marciocg/palpite-megasena
|
palpite-megasena.py
|
Python
|
gpl-2.0
| 708
| 0.009887
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from bottle import route, get, post, view, run, app, default_app, static_file
from palpite import palpite
@get('/')
@view('index.html')
def index_get():
return
|
dict()
@post('/')
@view('index.html')
def index_post():
gera_palpite = palpite()
return d
|
ict(jogo=gera_palpite)
@get('/favicon.ico')
@get('/favicon.png')
def favicon():
return static_file('/static/favicon.png', root='.')
@get('/normalize.css')
def normalizecss():
return static_file('normalize.css', root='static')
@get('/skeleton.css')
def skeletoncss():
arq = 'skeleton.css'
return static_file(arq, root='./static')
app = default_app()
app.run(server='gae',debug=True)
|
c4fcm/CivilServant
|
utils/data_migrations/12.06.2016.updated_page_created_at_to_utc.py
|
Python
|
mit
| 2,850
| 0.022105
|
"""
ONE-TIME UPDATE OF converting SubredditPage.created_at, FrontPage.created_at to utc
Daylight Savings Time = Nov 6 2am
if we see the ET time Nov 6 12am, then it is EDT: EDT-->UTC = +4 = Nov 6 4am
if we see the ET time Nov 6 1-2am, then it is unclear whether it is EDT or EST; assume it is EST
> assumption is becaues I don't think we really care about this, as long as we are consistent
if we see the ET time Nov 6 2:30am, then it is EST: EST-->UTC = +5 = Nov 6 7:30am
if we see the ET time Nov 6 3am, then it is EST: EST-->UTC = +5 = Nov 6 8am
"""
import re, random, string, sys, math, os, datetime
BASE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../", "../")
sys.path.append(BASE_DIR)
import simplejson as json
ENV = os.environ['CS_ENV']
with open(os.path.join(BASE_DIR, "config") + "/{env}.json".format(env=ENV), "r") as config:
DBCONFIG = json.loads(config.read())
### LOAD SQLALCHEMY SESSION
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.models import Base, SubredditPage, FrontPage
db_engine = create_engine("mysql://{user}:{password}@{host}/{database}".format(
host = DBCONFIG['host'],
user = DBCONFIG['user'],
password = DBCONFIG['password'],
database = DBCONFIG['database']))
Base.metadata.bind = db_engine
DBSession = sessionmaker(bind=db_engine)
db_session = DBSession()
#################
DST_LIMIT_ET = datetime.datetime(2016, 11, 6, 1, 00) # year, month, day, hour, second
EDT_TO_UTC = datetime.timedelta(hours=4) # +4 hours; EDT = in DST; before Nov 6 1am
EST_TO_UTC = datetime.timedelta(hours=5) # +5 hours; EST = not in DST; after Nov 6 1am
for model in [SubredditPage, FrontPage]:
posts = db_session.query(model)
total_posts = posts.count()
num_updated_po
|
sts = 0
last_et_time_utc = datetime.datetime.min
last_edt_time = datetime.datetime.min
num_confusing_et_times = 0
print("Testing {0} posts...".format(total_posts))
for post in posts.all():
if not post.is_utc:
created_at_et = post.created_at
if created_at_et < DST_LIMIT_ET:
# is EDT; in DST; before Nov 6 2am = Daylight Savings Time
created_at_utc = created_at_et - EDT_TO_UTC
last_edt_time = max([last_edt_time, created_at
|
_et])
else:
# is EST; out of DST
if created_at_et < DST_LIMIT_ET + datetime.timedelta(hours=1):
# if between 1am and 2am on Nov 6
num_confusing_et_times += 1
created_at_utc = created_at_et - EST_TO_UTC
post.created_at = created_at_utc
post.is_utc = True
num_updated_posts += 1
last_et_time_utc = max([last_et_time_utc, created_at_utc])
print("Updating created_at for {0} posts; updated created_at to UTC up to time {1}; DST found up to time {2}; num_confusing_et_times: {3}".format(num_updated_posts, last_et_time_utc, last_edt_time, num_confusing_et_times))
db_session.commit()
|
Sirs0ri/PersonalAssistant
|
samantha/plugins/plugin.py
|
Python
|
mit
| 4,002
| 0
|
"""Contains a baseclass for plugins."""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
from collections import Iterable
from functools import wraps
import logging
# related third
|
party imports
# application specific imports
from samantha.core import subscribe_to
__version__ = "1.4.10"
# Initialize the logger
LOGGER = logging.getLogger(__name_
|
_)
class Plugin(object):
"""Baseclass, that holds the mandatory methods a plugin must support."""
def __init__(self, name="Plugin", active=False,
logger=None, file_path=None, plugin_type="s"):
"""Set the plugin's attributes, if they're not set already."""
self.name = name
self.uid = "NO_UID"
self.is_active = active
if logger:
self.logger = logger
else:
self.logger = LOGGER
if file_path:
self.path = file_path
else:
self.path = __file__
self.plugin_type = plugin_type
plugin_type_long = "device" if self.plugin_type is "d" else "plugin"
self.logger.info("Initialisation of the %s '%s' complete. "
"The %s is %sactive.",
plugin_type_long,
self.name,
plugin_type_long,
"" if self.is_active else "not ")
def __str__(self):
"""Return a simple string representation of the plugin."""
return "{} '{}', UID {}".format(
("Device" if self.plugin_type == "d" else "Plugin"),
self.name,
self.uid)
def __repr__(self):
"""Return a verbose string representation of the plugin."""
return "{type}\t{name:10}\tUID {uid}\tLoaded from {path}".format(
type=("Device" if self.plugin_type == "d" else "Plugin"),
name=self.name,
uid=self.uid,
path=self.path)
class Device(Plugin):
"""Baseclass, that holds the mandatory methods a device must support."""
def __init__(self, name="Device", active=False,
logger=None, file_path=None, group=None):
"""Set the plugin's attributes, if they're not set already."""
self.name = name
self.is_available = None
self.group = group
self.power_on_keywords = ["turn.on." + self.name.lower()]
self.power_off_keywords = ["turn.off." + self.name.lower()]
if group:
if not isinstance(group, str) and isinstance(group, Iterable):
top_level = []
sub_level = []
words = []
for key in group:
if key[-1] == ".":
sub_level.append(key)
else:
top_level.append(key)
words.append(key)
for sub in sub_level:
for top in top_level:
words.append(sub + top)
for word in words:
self.power_on_keywords.append("turn.on." + word.lower())
self.power_off_keywords.append("turn.off." + word.lower())
else:
self.power_on_keywords.append("turn.on." + group.lower())
self.power_off_keywords.append("turn.off." + group.lower())
# self.logger.info("Initialisation complete")
super(Device, self).__init__(name, active, logger, file_path, "d")
def turn_on(self, func):
@subscribe_to(self.power_on_keywords)
@wraps(func)
def function(*args, **kwargs):
return func(*args, **kwargs)
return function
def turn_off(self, func):
@subscribe_to(self.power_off_keywords)
@wraps(func)
def function(*args, **kwargs):
return func(*args, **kwargs)
return function
|
aptana/Pydev
|
tests/org.python.pydev.refactoring.tests/src/python/visitor/selectionextension/testSelectionExtensionExprFail.py
|
Python
|
epl-1.0
| 286
| 0.027972
|
class A:
def test(sel
|
f):
print "I##|nitializing A", "test"##|
attribute = "hello"
def my_method(self):
print self.attribute
a = A()
a.test()
##r Should expand to Full String "Initializing A"
# Invalid selection:
# nitializing A", "
|
test"
|
SkyTruth/CrowdProjects
|
Data/FrackFinder/PA/2013/Transformations_and_QAQC/MoorFrog/bin/task2shp.py
|
Python
|
bsd-3-clause
| 22,106
| 0.002171
|
#!/usr/bin/env python
# This document is part of CrowdProjects
# https://github.com/skytruth/CrowdProjects
# =========================================================================== #
#
# Copyright (c) 2014, SkyTruth
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#
# =========================================================================== #
"""
Convert a FrackFinder MoorFrog 2005-2010 JSON export to three layers:
bounding boxes, pond clicks, and well pad points
"""
import os
import sys
import json
from os import sep
from os.path import *
try:
from osgeo import ogr
from osgeo import osr
except ImportError:
import ogr
import osr
#/* ======================================================================= */#
#/* Build Information
#/* ======================================================================= */#
__author__ = 'Kevin Wurster'
__version__ = '0.1-dev'
__release__ = '2014-06-19'
__docname__ = basename(__file__)
__license__ = """
Copyright (c) 2014, SkyTruth
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#/* ======================================================================= */#
#/* Define print_usage() function
#/* ======================================================================= */#
def print_usage():
"""
Command line usage information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Usage: %s [options] task.json task_run.json output/directory
Options:
--help-info -> Print out a list of help related flags
--overwrite -> Overwrite output files
--prefix=str -> Output filename prefix - defaults to 'MoorFrog-'
--wellpad-file-name=str -> Defaults to 'wellpad.shp
--bbox-file-name=str -> Defaults to 'bbox.shp
--clicks-file-name=str -> Defaults to 'clicks.shp
--no-bbox -> Don't generate bounding boxes file
--no-click -> Don't gener
|
ate clicks file
--no-wellpad -> Don't generate wellpads file
|
--of=driver -> Output driver name/file type - default='ESRI Shapefile'
--epsg=int -> EPSG code for coordinates in task.json - default='4326'
""" % __docname__)
return 1
#/* ======================================================================= */#
#/* Define print_license() function
#/* ======================================================================= */#
def print_license():
"""
Print out license information
:return: 1 for exit code purposes
:rtype: int
"""
print(__license__)
return 1
#/* ======================================================================= */#
#/* Define print_help() function
#/* ======================================================================= */#
def print_help():
"""
Detailed help information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help: {0}
------{1}
Input is task.json and task_run.json from MoorFrog
Output is a set of bounding boxes, well pad points,
and pond clicks.
""".format(__docname__, '-' * len(__docname__)))
return 1
#/* ======================================================================= */#
#/* Define print_help_info() function
#/* ======================================================================= */#
def print_help_info():
"""
Print a list of help related flags
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help flags:
--help -> More detailed description of this utility
--usage -> Arguments, parameters, flags, options, etc.
--version -> Version and ownership information
--license -> License information
""")
return 1
#/* ======================================================================= */#
#/* Define print_version() function
#/* ======================================================================= */#
def print_version():
"""
Print script version information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s
""" % (__docname__, __version__, __release__))
return 1
#/* ======================================================================= */#
#/* Define create_bboxes() function
#/* ======================================================================= */#
def create_bboxes(tasks, layer):
"""
Add bounding boxes to input layer
:param tasks: tasks from json.load(open('task.json'))
:type tasks: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
# Update user
print("Creating bounding boxes")
# Define fields
print(" Defining bbox fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('site_id', 254, ogr.OFTString),
('location', 254, ogr.OFTString),
('wms_url', 254, ogr.OFTString),
('county', 254, ogr.OFTString),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
num_tasks = len(tasks)
i = 0
print(" Processing %s tasks..." % str(len(tasks)))
for task in tasks:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(num_tasks)))
sys.stdout.flush()
# Get field content
location = str(task['info']['latitude']) + str(task['info']['long
|
robwarm/gpaw-symm
|
gpaw/eigensolvers/rmm_diis_old.py
|
Python
|
gpl-3.0
| 4,714
| 0.001697
|
"""Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
|
else:
|
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG
|
litnimax/astconfman
|
astconfman/migrations/versions/2728b7328b78_.py
|
Python
|
agpl-3.0
| 724
| 0.012431
|
"""empty message
Revision ID: 2728b7328b78
Revises: d7c7f3be40a
Create Date: 2015-10-20 13:44:12.129389
"""
|
# revision identifiers, used by Alembic.
revision = '2728b7328b78'
down_revision = 'd7c7f3be40a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('conference_schedule')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('conference_schedule',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('entry'
|
, sa.VARCHAR(length=256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/identitytoolkit/alpha/tenant.py
|
Python
|
apache-2.0
| 10,881
| 0.002022
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import tenant_pb2
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import tenant_pb2_grpc
from typing import List
class Tenant(object):
def __init__(
self,
name: str = None,
display_name: str = None,
allow_password_signup: bool = None,
enable_email_link_signin: bool = None,
disable_auth: bool = None,
enable_anonymous_user: bool = None,
mfa_config: dict = None,
test_phone_numbers: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.allow_password_signup = allow_password_signup
self.enable_email_link_signin = enable_email_link_signin
self.disable_auth = disable_auth
self.enable_anonymous_user = enable_anonymous_user
self.mfa_config = mfa_config
self.test_phone_numbers = test_phone_numbers
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.ApplyIdentitytoolkitAlphaTenantRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyIdentitytoolkitAlphaTenant(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.allow_password_signup = Primitive.from_proto(
response.allow_password_signup
)
self.enable_email_link_signin = Primitive.from_proto(
response.enable_email_link_signin
)
self.disable_auth = Primitive.from_proto(response.disable_auth)
self.enable_anonymous_user = Primitive.from_proto(
response.enable_anonymous_user
)
self.mfa_config = TenantMfaConfig.from_proto(response.mfa_config)
self.test_phone_numbers = Primitive.from_proto(response.test_phone_numbers)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.DeleteIdentitytoolkitAlphaTenantRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(se
|
lf.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_passwo
|
rd_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteIdentitytoolkitAlphaTenant(request)
@classmethod
def list(self, project, service_account_file=""):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.ListIdentitytoolkitAlphaTenantRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListIdentitytoolkitAlphaTenant(request).items
def to_proto(self):
resource = tenant_pb2.IdentitytoolkitAlphaTenant()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
resource.mfa_config.CopyFrom(TenantMfaConfig.to_proto(self.mfa_config))
else:
resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
resource.test_phone_numbers = Primitive.to_proto(self.test_phone_numbers)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class TenantMfaConfig(object):
def __init__(self, state: str = None, enabled_providers: list = None):
self.state = state
self.enabled_providers = enabled_providers
@
|
LamCiuLoeng/internal
|
tribal/model/__init__.py
|
Python
|
mit
| 6,536
| 0.009945
|
# -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
# from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
#===============================================================================
# test by cl
#===============================================================================
import sqlalchemy
from datetime import date, datetime as dt
from sqlalchemy.orm.session import SessionExtension
from sqlalchemy.orm import attributes, object_mapper
DB_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
class LogSessionExtension(SessionExtension):
def before_flush(self, session, flush_context, instances):
print "_^" * 30
print "Come into my log session extension"
print "_*" * 30
log = []
for obj in session.dirty:
obj_mapper = object_mapper(obj)
obj_state = attributes.instance_state(obj)
for om in obj_mapper.iterate_to_root():
for obj_col in om.local_table.c:
try:
prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
continue
try:
need2log = obj_col.info["auto_log"]
except:
continue
else:
if not need2log : continue
if prop.key not in obj_state.dict:
getattr(obj, prop.key)
history = attributes.get_history(obj, prop.key)
if not history.has_changes():continue
a, u, d = history
if d:
attr_old_value = d[0]
elif u:
attr_old_value = u[0]
else:
attr_old_value = ""
attr_new_value = a[0] or ""
if not self._isUpdateReally(obj_col, attr_old_value, attr_new_value) : continue
_old, _new = self._2string(obj_col, attr_old_value, attr_new_value)
log.append((obj_col.info.get("field_name", prop.key), _old, _new))
if log :
print log
def _isUpdateReally(self, col, old_value, new_value):
if not old_value and not new_value : return False
if not (old_value and new_value) : return True
if isinstance(col.type, sqlalchemy.types.Integer): return old_value == int(new_value)
if isinstance(col.type, sqlalchemy.types.Float): return old_value == float(new_value)
if isinstance(col.type, (sqlalchemy.types.Unicode, sqlalchemy.types.String)): return unicode(old_value) == unicode(new_value)
if isinstance(col.type, (sqlalchemy.types.Date, sqlalchemy.types.DateTime)) : return old_value == dt.strptime(new_value, DB_DATE_FORMAT)
# if isinstance(prop.type, sqlalchemy.types.Boolean) : return old_value == bool(new_value)
return False
def _2string(self, col, old_value, new_value):
if isinstance(col.type, sqlalchemy.types.Integer): return (old_value or '', new_value or '')
if isinstance(col.type, sqlalchemy.types.Float): return (old_value or '', new_value or '')
if isinstance(col.type, (sqlalchemy.types.Unicode, sqlalchemy.types.String)): return (old_value or "", new_value or "")
if isinstance(col.type, (sqlalchemy.types.Date, sqlalchemy.types.DateTime)) :
_o = "" if not old_value else old_value.strftime(DB_DATE_FORMAT)
_n = new_value or ""
return (_o, _n)
return (old_value, new_value)
# maker = sessionmaker(autoflush = True, autocom
|
mit = False,
# extension = [ LogSessionExtension(), ZopeTransactionExtension(), ])
maker = sessionmaker(autoflush = True, autocommit = False,
extension = ZopeTransactionExtension())
DBSession = scoped_session(maker)
# Base class f
|
or all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model(engine):
"""Call me before using any of the tables or classes in the model."""
engine.dialect.supports_sane_rowcount = False
DBSession.configure(bind = engine)
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from tribal.model.auth import User, Group, Permission
from tribal.model.sportsware import *
from tribal.model.orsay import *
from tribal.model.orchestra import *
from tribal.model.sample import *
# from tribal.model.pei import *
from tribal.model.sysutil import *
from tribal.model.dba import *
from tribal.model.bby import *
from tribal.model.tag import *
# from tribal.model.cabelas import *
from tribal.model.lemmi import *
from tribal.model.tmw import *
from tribal.model.mglobalpack import *
from tribal.model.prepress import *
|
joshalbrecht/memdam
|
memdam/server/web/utils.py
|
Python
|
gpl-2.0
| 1,213
| 0.003298
|
import os
import flask
import memdam.blobstore.localfolder
import memdam.eventstore.sqlite
from memdam.server.web import app
def get_archive(username):
"""
:param username: the name of the user for which we should get the event archive
:type username: string
:returns: a new (or cached) archive
:rtype: memdam.eventstore.api.Eventstore
"""
db_file = app.config['DATABASE_FOLDER']
if db_file == ':memory:':
return fla
|
sk.g._archives[username]
assert db_file != ''
db_file = os.path.join(db_file, username)
if not os.path.exists(db_file):
os.makedirs(db_file)
archive = memdam.eventstore.sqlite.Eventstore(db_file)
return archive
def get_blobstore(username):
"""
:param username: the name of the user for which we should get t
|
he blobstore folder.
:type username: string
:returns: a new (or cached) blobstore
:rtype: memdam.blobstore.api.Blobstore
"""
base_folder = app.config['BLOBSTORE_FOLDER']
user_folder = os.path.join(base_folder, username)
if not os.path.exists(user_folder):
os.makedirs(user_folder)
blobstore = memdam.blobstore.localfolder.Blobstore(user_folder)
return blobstore
|
wanderine/nipype
|
nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py
|
Python
|
bsd-3-clause
| 1,433
| 0.020237
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.semtools.utilities.brains import BRAINSInitializedControlPoints
def test_BRAINSInitializedControlPoints_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputLandmarksFile=dict(argstr='--outputLandmarksFile %s',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
permuteOrder=dict(argstr='--permuteOrder %s',
sep=',',
),
splineGridSize=dict(argstr='--splineGridSize %s',
sep=',',
),
terminal_output=dict(nohash=True,
),
)
inputs = BRAINSInitializedControlPoints.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSInitializedControlPoints_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSInitializedControlPoints.output_spec()
for key, metadata in o
|
utput_map.items():
for metakey, value in metadata.items():
|
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
camsas/qjump-nsdi15-plotting
|
table1/do_hist.py
|
Python
|
bsd-3-clause
| 2,737
| 0.008403
|
#! /usr/bin/python
# Copyright (c) 2015, Matthew P. Grosvenor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREM
|
ENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF
|
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import numpy
if len(sys.argv) < 3:
print "Usage: do_hist <input_file> <bins>"
sys.exit(-1)
bins_count = int(sys.argv[2])
nums = []
parse_errors = 0
range_errors = 0
for line in open(sys.argv[1]):
try:
num = float(line)
except:
parse_errors += 1
continue
if num < 0 or num > 1000000000:
range_errors += 1
continue
nums.append(num)
print "Parse errors found: %i" % (parse_errors)
print "Range errors found: %i" % (range_errors)
if len(nums) < 100:
print "Fatal Error! Not enough points (%i)" % len(nums)
sys.exit(-1)
nums.sort()
#min = numpy.min(nums)
#max = numpy.max(nums)
#print min,max
(ys,xs) = numpy.histogram(nums,bins=bins_count, range=[min(nums),max(nums)])
percent = [0,1,10,25,50,75,90,99,100]
print "Total samples %i" % len(nums)
for p in percent:
print "%3i%% - %fms" % (p, numpy.percentile(nums,p * 1.0))
#print "%i" % (numpy.percentile(nums,p * 1.0))
print "Range = %fms" % (numpy.max(nums) - numpy.min(nums))
out = open("hist_out.ssv","w")
for i in range(0,len(ys)):
out.write("%i %f\n" % (xs[i],ys[i] * 100.0 /len(nums)))
|
arizvisa/syringe
|
src/hooktest.py
|
Python
|
bsd-2-clause
| 3,457
| 0.01186
|
import psyco
psyco.full()
import linker.coff
from linker import store
m,n='python26.dll','Py_DecRef'
localname = None,'__imp__<%s!%s>'%(m,n)
if True:
# this should import from python26.lib,Py_DecRef
# this should export ia32.obj,stuff
a = linker.coff.object.open('~/work/syringe/src/ia32.obj')
# imports None,Py_DecRef
# this should import from python26.dll,Py_DecRef
# this should export Py_DecRef
b = linker.coff.library.open('~/python26/libs/python26.lib')
# imports python26.dll,Py_DecRef
# exports None,Py_DecRef
# this should import from whatever
# and export whatever
c = linker.coff.executable.open('~/../../windows/syswow64/python26.dll')
# expots python26.dll,Py_DecRef
d = linker.coff.executable.open('~/../../windows/syswow64/msvcr100.dll')
# raise NotImplementedError("symbol consolidation isn't working")
if True:
z = b
z[store.BaseAddress] = 0x10000000
for x in z.undefined:
z[x] = 0xbbbbbbbb
out = file('blah','wb')
for x in z.segments:
y = z.getsegment(x)
y = z.relocatesegment(x, y)
out.write(y)
out.close()
if False:
#print a
#print c
if True:
z = linker.new()
print a
z.addstore(a)
print b
z.addstore(b)
print c
z.addstore(c)
print d
z.addstore(d)
if False:
m,n='msvcr100.dll','_heapmin'
print True,(None,n) in d.globals
print False,(None,n) in z.globals
print False,(m,n) in d.globals
print True,(m,n) in z.globals
if False:
paths = '~/../../windows/syswow64','~/python26/dlls'
# dlls = 'ntdll.dll','kernel32.dll','python26.dll','msvcr100.dll','shell32.dll','user32.dll','gdi32.dll','pcwum.dll','advapi32.dll','shlwapi.dll','cryptsp.dll','msvcrt.dll','kernelbase.dll','shunimpl.dll','sspicli.dll'
dlls = 'msvcr100.dll',
for filename in dlls:
print 'loading %s'% filename
for p in paths:
try:
z.addstore(linker.coff.executable.open('%s/%s'%(p,filename)))
break
except IOError:
pass
continue
continue
print [(m,n) for m,n in z.undefined if m is None]
if False:
modules = set((m for m,n in z.undefined if m is not None))
print [(m,n) for m,n in z.undefined if m is None]
for filename in modules:
if '-' in filename:
continue
print 'loading %s'% filename
for p in paths:
try:
z.addstore(linker.coff.executable.open('%s/%s'%(p,filename)))
break
except IOErr
|
or:
pass
continue
continue
if True:
z[store.BaseAddress] = 0x10000000
for x in z.undefined:
z[x] = 0xbbbbbbbb
if True:
print '-'*25
out = file('blah','wb')
for x in z.segments:
y = z.getsegment(x)
y = z.relocatesegment(x, y)
out.write(y)
out.close()
if False:
print '-'*25
for x in a.externals:
|
a[x] = 0xbbbbbbbb
a[store.BaseAddress] = 0x10000000
b = a.getsegment('.text')
c = a.relocatesegment('.text',b)
# import ptypes
# print ptypes.hexdump(c, a['.text'])
|
Nic30/hwtLib
|
hwtLib/tests/all.py
|
Python
|
mit
| 22,367
| 0.001162
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from unittest import TestLoader, TextTestRunner, TestSuite
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.abstract.busEndpoint_test import BusEndpointTC
from hwtLib.abstract.frame_utils.alignment_utils_test import FrameAlignmentUtilsTC
from hwtLib.abstract.frame_utils.join.test import FrameJoinUtilsTC
from hwtLib.abstract.template_configured_test import TemplateConfigured_TC
from hwtLib.amba.axiLite_comp.buff_test import AxiRegTC
from hwtLib.amba.axiLite_comp.endpoint_arr_test import AxiLiteEndpointArrTCs
from hwtLib.amba.axiLite_comp.endpoint_fromInterfaces_test import \
AxiLiteEndpoint_fromInterfaceTC, AxiLiteEndpoint_fromInterface_arr_TC
from hwtLib.amba.axiLite_comp.endpoint_struct_test import \
AxiLiteEndpoint_arrayStruct_TC, AxiLiteEndpoint_struct_TC
from hwtLib.amba.axiLite_comp.endpoint_test import AxiLiteEndpointTCs
from hwtLib.amba.axiLite_comp.to_axi_test import AxiLite_to_Axi_TC
from hwtLib.amba.axi_comp.cache.cacheWriteAllocWawOnlyWritePropagating_test import AxiCacheWriteAllocWawOnlyWritePropagatingTCs
from hwtLib.amba.axi_comp.cache.pseudo_lru_test import PseudoLru_TC
from hwtLib.amba.axi_comp.interconnect.matrixAddrCrossbar_test import\
AxiInterconnectMatrixAddrCrossbar_TCs
from hwtLib.amba.axi_comp.interconnect.matrixCrossbar_test import \
AxiInterconnectMatrixCrossbar_TCs
from hwtLib.amba.axi_comp.interconnect.matrixR_test import AxiInterconnectMatrixR_TCs
from hwtLib.amba.axi_comp.interconnect.matrixW_test import AxiInterconnectMatrixW_TCs
from hwtLib.amba.axi_comp.lsu.read_aggregator_test import AxiReadAggregator_TCs
from hwtLib.amba.axi_comp.lsu.store_queue_write_propagating_test import AxiStoreQueueWritePropagating_TCs
from hwtLib.amba.axi_comp.lsu.write_aggregator_test import AxiWriteAggregator_TCs
from hwtLib.amba.axi_comp.resize_test import AxiResizeTC
from hwtLib.amba.axi_comp.sim.ag_test import Axi_ag_TC
from hwtLib.amba.axi_comp.slave_timeout_test import AxiSlaveTimeoutTC
from hwtLib.amba.axi_comp.static_remap_test import AxiStaticRemapTCs
from hwtLib.amba.axi_comp.stream_to_mem_test import Axi4_streamToMemTC
from hwtLib.amba.axi_comp.tester_test import AxiTesterTC
from hwtLib.amba.axi_comp.to_axiLite_test import Axi_to_AxiLite_TC
from hwtLib.amba.axi_test import AxiTC
from hwtLib.amba.axis_comp.en_test import AxiS_en_TC
from hwtLib.amba.axis_comp.fifoDrop_test import AxiSFifoDropTC
from hwtLib.amba.axis_comp.fifoMeasuring_test import AxiS_fifoMeasuringTC
from hwtLib.amba.axis_comp.frameGen_test import AxisFrameGenTC
from hwtLib.amba.axis_comp.frame_deparser.test import AxiS_frameDeparser_TC
from hwtLib.amba.axis_comp.frame_join.test import AxiS_FrameJoin_TCs
from hwtLib.amba.axis_comp.frame_parser.footer_split_test import AxiS_footerSplitTC
from hwtLib.amba.axis_comp.frame_parser.test import AxiS_frameParserTC
from hwtLib.amba.axis_comp.resizer_test import AxiS_resizer_TCs
from hwtLib.amba.axis_comp.storedBurst_test import AxiSStoredBurstTC
from hwtLib.amba.axis_comp.strformat_test import AxiS_strFormat_TC
from hwtLib.amba.datapump.interconnect.rStrictOrder_test import \
RStrictOrderInterconnectTC
from hwtLib.amba.datapump.interconnect.wStrictOrderComplex_test import \
WStrictOrderInterconnectComplexTC
from hwtLib.amba.datapump.interconnect.wStrictOrder_test import \
WStrictOrderInterconnectTC, WStrictOrderInterconnect2TC
from hwtLib.amba.datapump.r_aligned_test import Axi_rDatapump_alignedTCs
from hwtLib.amba.datapump.r_unaligned_test import Axi_rDatapump_unalignedTCs
from hwtLib.amba.datapump.w_test import Axi_wDatapumpTCs
from hwtLib.avalon.axiToMm_test import AxiToAvalonMm_TCs
from hwtLib.avalon.endpoint_test import AvalonMmEndpointTCs
from hwtLib.avalon.mm_buff_test import AvalonMmBuff_TC
from hwtLib.avalon.sim.mmAgent_test import AvalonMmAgentTC
from hwtLib.avalon.sim.stAgent_test import AvalonStAgentTC
from hwtLib.cesnet.mi32.axi4Lite_bridges_test import Mi32Axi4LiteBrigesTC
from hwtLib.cesnet.mi32.endpoint_test import Mi32EndpointTCs
from hwtLib.cesnet.mi32.interconnectMatrix_test import Mi32InterconnectMatrixTC
from hwtLib.cesnet.mi32.mi32agent_test import Mi32AgentTC
from hwtLib.cesnet.mi32.sliding_window_test import Mi32SlidingWindowTC
from hwtLib.cesnet.mi32.to_axi4Lite_test import Mi32_to_Axi4LiteTC
from hwtLib.clocking.cdc_test import CdcTC
from hwtLib.common_nonstd_interfaces.addr_data_hs_to_Axi_test import AddrDataHs_to_Axi_TCs
|
from hwtLib.examples.arithmetic.cntr_test import CntrTC, CntrResourceAnalysisTC
from hwtLib.examples.arithmetic.multiplierBooth_test import MultiplierBoothTC
from hwtLib.examples.arithmetic.privateSignals_test
|
import PrivateSignalsOfStructTypeTC
from hwtLib.examples.arithmetic.selfRefCntr_test import SelfRefCntrTC
from hwtLib.examples.arithmetic.twoCntrs_test import TwoCntrsTC
from hwtLib.examples.arithmetic.vhdl_vector_auto_casts import VhdlVectorAutoCastExampleTC
from hwtLib.examples.arithmetic.widthCasting import WidthCastingExampleTC
from hwtLib.examples.axi.debugbusmonitor_test import DebugBusMonitorExampleAxiTC
from hwtLib.examples.axi.oooOp.counterArray_test import OooOpExampleCounterArray_TCs
from hwtLib.examples.axi.oooOp.counterHashTable_test import OooOpExampleCounterHashTable_TCs
from hwtLib.examples.axi.simpleAxiRegs_test import SimpleAxiRegsTC
from hwtLib.examples.builders.ethAddrUpdater_test import EthAddrUpdaterTCs
from hwtLib.examples.builders.handshakedBuilderSimple import \
HandshakedBuilderSimpleTC
from hwtLib.examples.builders.hsBuilderSplit_test import HsBuilderSplit_TC
from hwtLib.examples.builders.hwException_test import HwExceptionCatch_TC
from hwtLib.examples.builders.pingResponder_test import PingResponderTC
from hwtLib.examples.emptyUnitWithSpi import EmptyUnitWithSpiTC
from hwtLib.examples.errors.combLoops import CombLoopAnalysisTC
from hwtLib.examples.errors.errors_test import ErrorsTC
from hwtLib.examples.hdlComments_test import HdlCommentsTC
from hwtLib.examples.hdlObjLists.listOfInterfaces0 import ListOfInterfacesSample0TC
from hwtLib.examples.hdlObjLists.listOfInterfaces1 import ListOfInterfacesSample1TC
from hwtLib.examples.hdlObjLists.listOfInterfaces2 import ListOfInterfacesSample2TC
from hwtLib.examples.hdlObjLists.listOfInterfaces3 import ListOfInterfacesSample3TC
from hwtLib.examples.hdlObjLists.listOfInterfaces4 import ListOfInterfacesSample4TC
from hwtLib.examples.hierarchy.hierarchySerialization_test import \
HierarchySerializationTC
from hwtLib.examples.hierarchy.simpleSubunit2 import SimpleSubunit2TC
from hwtLib.examples.hierarchy.simpleSubunit3 import SimpleSubunit3TC
from hwtLib.examples.hierarchy.simpleSubunit_test import SimpleSubunitTC
from hwtLib.examples.hierarchy.unitToUnitConnection import \
UnitToUnitConnectionTC
from hwtLib.examples.hierarchy.unitWrapper_test import UnitWrapperTC
from hwtLib.examples.mem.avalonmm_ram_test import AvalonMmBram_TC
from hwtLib.examples.mem.axi_ram_test import Axi4BRam_TC
from hwtLib.examples.mem.bram_wire import BramWireTC
from hwtLib.examples.mem.ram_test import RamResourcesTC, \
SimpleAsyncRamTC, SimpleSyncRamTC
from hwtLib.examples.mem.reg_test import DRegTC, RegSerializationTC, \
DoubleRRegTC, DReg_asyncRstTC
from hwtLib.examples.mem.rom_test import SimpleRomTC, SimpleSyncRomTC, \
RomResourcesTC
from hwtLib.examples.operators.cast_test import CastTc
from hwtLib.examples.operators.concat_test import ConcatTC
from hwtLib.examples.operators.indexing_test import IndexingTC
from hwtLib.examples.parametrization_test import ParametrizationTC
from hwtLib.examples.rtlLvl.rtlLvl_test import RtlLvlTC
from hwtLib.examples.showcase0_test import Showcase0TC
from hwtLib.examples.simple2withNonDirectIntConnection import \
Simple2withNonDirectIntConnectionTC
from hwtLib.examples.simpleAxiStream_test import SimpleUnitAxiStream_TC
from hwtLib.examples.simpleWithNonDirectIntConncetion import \
SimpleWithNonDirectIntConncetionTC
from hwtLib.examples.simpleWithParam import SimpleUnitWithParamTC
from hwtLib.examples.simple_test import SimpleTC
from hwtLib.examples.specialIntfTypes.intfWithArray import InterfaceWithArrayTypesTC
from hwtLib.examples.statements.
|
244xiao/blender-java
|
blender-java/src/resources/release/scripts/ui/properties_data_metaball.py
|
Python
|
gpl-2.0
| 3,814
| 0.000524
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from rna_prop_ui import PropertyPanel
class DataButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return context.meta_ball
class DATA_PT_context_metaball(DataButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
def draw(self, context):
|
layout = self.layout
ob = context.object
mball = context.meta_ball
space = context.space_data
if ob:
layout.template_ID(ob, "data", unlink="None")
elif mball:
layout.template_ID(space, "pin_id", unlink="None")
class DATA_PT_metaball(DataButtonsPanel, bpy.types.Panel):
bl_label = "Metaball"
def draw(self, context):
layout = self.layout
mball = context.meta_b
|
all
split = layout.split()
col = split.column()
col.label(text="Resolution:")
sub = col.column(align=True)
sub.prop(mball, "resolution", text="View")
sub.prop(mball, "render_resolution", text="Render")
col = split.column()
col.label(text="Settings:")
col.prop(mball, "threshold", text="Threshold")
layout.label(text="Update:")
layout.prop(mball, "update_method", expand=True)
class DATA_PT_metaball_element(DataButtonsPanel, bpy.types.Panel):
bl_label = "Active Element"
@classmethod
def poll(cls, context):
return (context.meta_ball and context.meta_ball.elements.active)
def draw(self, context):
layout = self.layout
metaelem = context.meta_ball.elements.active
layout.prop(metaelem, "type")
split = layout.split()
col = split.column(align=True)
col.label(text="Settings:")
col.prop(metaelem, "stiffness", text="Stiffness")
col.prop(metaelem, "use_negative", text="Negative")
col.prop(metaelem, "hide", text="Hide")
col = split.column(align=True)
if metaelem.type in ('CUBE', 'ELLIPSOID'):
col.label(text="Size:")
col.prop(metaelem, "size_x", text="X")
col.prop(metaelem, "size_y", text="Y")
col.prop(metaelem, "size_z", text="Z")
elif metaelem.type == 'TUBE':
col.label(text="Size:")
col.prop(metaelem, "size_x", text="X")
elif metaelem.type == 'PLANE':
col.label(text="Size:")
col.prop(metaelem, "size_x", text="X")
col.prop(metaelem, "size_y", text="Y")
class DATA_PT_custom_props_metaball(DataButtonsPanel, PropertyPanel, bpy.types.Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "object.data"
_property_type = bpy.types.MetaBall
def register():
pass
def unregister():
pass
if __name__ == "__main__":
register()
|
ldu4/criu
|
test/inhfd/pipe.py
|
Python
|
lgpl-2.1
| 215
| 0.037209
|
import os
def create_fds():
(fd1, fd2) = os.pipe()
return (os.fdopen(fd2, "w"), os.fdo
|
pen(fd1, "r"))
def filename(pipef):
return 'pipe:[%d]' % os.fstat(pipef.fileno())
|
.st_ino
def dump_opts(sockf):
return [ ]
|
teeple/pns_server
|
work/install/Python-2.7.4/Lib/plat-mac/MiniAEFrame.py
|
Python
|
gpl-2.0
| 6,519
| 0.004295
|
"""MiniAEFrame - A minimal AppleEvent Application framework.
There are two classes:
AEServer -- a mixin class offering nice AE handling.
MiniApplication -- a very minimal alternative to FrameWork.py,
only suitable for the simplest of AppleEvent servers.
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the MiniAEFrame module is removed.", stacklevel=2)
import traceback
import MacOS
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon.Events import *
from Carbon import Menu
from Carbon import Win
from Carbon.Windows import *
from Carbon import Qd
import aetools
import EasyDialogs
kHighLevelEvent = 23 # Not defined anywhere for Python yet?
class MiniApplication:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
# Initialize menu
self.appleid = 1
self.quitid = 2
Menu.ClearMenuBar()
s
|
elf.applemenu = applemenu = Menu.NewMenu(self.appleid, "\024")
applemenu.AppendMenu("%s;(-" % self.getaboutmenutext())
if MacOS.runtimemodel == 'ppc':
applemenu.AppendResMenu('DRVR')
applemenu.InsertMenu(0)
self.quitmenu = Menu.NewMenu(self.quitid, "File")
self.quitmenu.AppendMenu("Quit")
self.quitmenu.SetItemCmd(1, ord("Q"))
|
self.quitmenu.InsertMenu(0)
Menu.DrawMenuBar()
def __del__(self):
self.close()
def close(self):
pass
def mainloop(self, mask = everyEvent, timeout = 60*60):
while not self.quitting:
self.dooneevent(mask, timeout)
def _quit(self):
self.quitting = 1
def dooneevent(self, mask = everyEvent, timeout = 60*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self.lowlevelhandler(event)
def lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
msg = "High Level Event: %r %r" % (code(message), code(h | (v<<16)))
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
elif what == keyDown:
c = chr(message & charCodeMask)
if modifiers & cmdKey:
if c == '.':
raise KeyboardInterrupt, "Command-period"
if c == 'q':
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
return
elif what == mouseDown:
partcode, window = Win.FindWindow(where)
if partcode == inMenuBar:
result = Menu.MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id == self.appleid:
if item == 1:
EasyDialogs.Message(self.getabouttext())
elif item > 1 and hasattr(Menu, 'OpenDeskAcc'):
name = self.applemenu.GetMenuItemText(item)
Menu.OpenDeskAcc(name)
elif id == self.quitid and item == 1:
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
Menu.HiliteMenu(0)
return
# Anything not handled is passed to Python/SIOUX
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
else:
print "Unhandled event:", event
def getabouttext(self):
return self.__class__.__name__
def getaboutmenutext(self):
return "About %s\311" % self.__class__.__name__
class AEServer:
def __init__(self):
self.ae_handlers = {}
def installaehandler(self, classe, type, callback):
AE.AEInstallEventHandler(classe, type, self.callback_wrapper)
self.ae_handlers[(classe, type)] = callback
def close(self):
for classe, type in self.ae_handlers.keys():
AE.AERemoveEventHandler(classe, type)
def callback_wrapper(self, _request, _reply):
_parameters, _attributes = aetools.unpackevent(_request)
_class = _attributes['evcl'].type
_type = _attributes['evid'].type
if (_class, _type) in self.ae_handlers:
_function = self.ae_handlers[(_class, _type)]
elif (_class, '****') in self.ae_handlers:
_function = self.ae_handlers[(_class, '****')]
elif ('****', '****') in self.ae_handlers:
_function = self.ae_handlers[('****', '****')]
else:
raise 'Cannot happen: AE callback without handler', (_class, _type)
# XXXX Do key-to-name mapping here
_parameters['_attributes'] = _attributes
_parameters['_class'] = _class
_parameters['_type'] = _type
if '----' in _parameters:
_object = _parameters['----']
del _parameters['----']
# The try/except that used to be here can mask programmer errors.
# Let the program crash, the programmer can always add a **args
# to the formal parameter list.
rv = _function(_object, **_parameters)
else:
#Same try/except comment as above
rv = _function(**_parameters)
if rv is None:
aetools.packevent(_reply, {})
else:
aetools.packevent(_reply, {'----':rv})
def code(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class _Test(AEServer, MiniApplication):
"""Mini test application, handles required events"""
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.installaehandler('aevt', 'oapp', self.open_app)
self.installaehandler('aevt', 'quit', self.quit)
self.installaehandler('****', '****', self.other)
self.mainloop()
def quit(self, **args):
self._quit()
def open_app(self, **args):
pass
def other(self, _object=None, _class=None, _type=None, **args):
print 'AppleEvent', (_class, _type), 'for', _object, 'Other args:', args
if __name__ == '__main__':
_Test()
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/commands/expression/import-std-module/weak_ptr/TestWeakPtrFromStdModule.py
|
Python
|
bsd-3-clause
| 944
| 0.003178
|
"""
Test ba
|
sic std::weak_ptr functionality.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSharedPtr(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settin
|
gs set target.import-std-module true")
self.expect("expr (int)*w.lock()", substrs=['(int) $0 = 3'])
self.expect("expr (int)(*w.lock() = 5)", substrs=['(int) $1 = 5'])
self.expect("expr (int)*w.lock()", substrs=['(int) $2 = 5'])
self.expect("expr w.use_count()", substrs=['(long) $3 = 1'])
self.expect("expr w.reset()")
self.expect("expr w.use_count()", substrs=['(long) $4 = 0'])
|
postlund/home-assistant
|
homeassistant/components/alexa/auth.py
|
Python
|
apache-2.0
| 5,485
| 0.000365
|
"""Support for Alexa skill auth."""
import asyncio
from datetime import timedelta
import json
import logging
import aiohttp
import async_timeout
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.util import dt
_LOGGER = logging.getLogger(__name__)
LWA_TOKEN_URI = "https://api.amazon.com/auth/o2/token"
LWA_HEADERS = {"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
PREEMPTIVE_REFRESH_TTL_IN_SECONDS = 300
STORAGE_KEY = "alexa_auth"
STORAGE_VERSION = 1
STORAGE_EXPIRE_TIME = "expire_time"
STORAGE_ACCESS_TOKEN = "access_token"
STORAGE_REFRESH_TOKEN = "refresh_token"
class Auth:
"""Handle authentication to send events to Alexa."""
def __init__(self, hass, client_id, client_secret):
"""Initialize the Auth class."""
self.hass = hass
self.client_id = client_id
self.client_secret = client_secret
self._prefs = None
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._get_token_lock = asyncio.Lock()
async def async_do_auth(self, accept_grant_code):
"""Do authentication with an AcceptGrant code."""
# access token not retrieved yet for the first time, so this should
# be an access token request
lwa_params = {
"grant_type": "authorization_code",
"code": accept_grant_code,
"client_id": self.client_id,
"client_secret": self.client_secret,
}
_LOGGER.debug(
"Calling LWA to get the access token (first time), with: %s",
json.dumps(lwa_params),
)
return await self._async_request_new_token(lwa_params)
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._prefs[STORAGE_ACCESS_TOKEN] = None
async def async_get_access_token(self):
"""Perform access token or token refresh request."""
async with self._get_token_lock:
if self._prefs is None:
await self.async_load_preferences()
if self.is_token_valid():
_LOGGER.debug("Token still valid, using it.")
return self._prefs[STORAGE_ACCESS_TOKEN]
if self._prefs[STORAGE_REFRESH_TOKEN] is None:
_LOGGER.debug("Token invalid and no refresh token available.")
return None
lwa_params = {
"grant_type": "refresh_token",
"refresh_token": self._prefs[STORAGE_REFRESH_TOKEN],
"client_id": self.client_id,
"client_secret": self.client_secret,
}
_LOGGER.debug("Calling LWA to refresh the access token.")
return await self._async_request_new_token(lwa_params)
@callback
def is_token_valid(self):
"""Check if a token is already loaded and if it is still valid."""
if not self._prefs[STORAGE_ACCESS_TOKEN]:
return False
expire_time = dt.parse_datetime(self._prefs[STORAGE_EXPIRE_TIME])
preemptive_expire_time = expire_time - timedelta(
seconds=PREEMPTIVE_REFRESH_TTL_IN_SECONDS
)
return dt.utcnow() < preemptive_expire_time
async def _async_request_new_token(self, lwa_params):
try:
session = aiohttp_client.async_get_clientsession(self.hass)
with async_timeout.timeout(10):
response = await session.post(
LWA_TOKEN_URI,
headers=LWA_HEADERS,
data=lwa_params,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling LWA to get auth token.")
return None
_LOGGER.debug("LWA response header: %s", response.headers)
_LOGGER.debug("LWA response status: %s", response.status)
if response.status != 200:
_LOGGER.error("Error calling LWA to get auth token.")
return None
response_json = await response.json()
_LOGGER.debug("LWA response body : %s", response_json)
access_token = response_json["access_token"]
refresh_token = response_json["refresh_token"]
expires_in = response_json["expires_in"]
expire_time = dt.utcnow() + timedelta(seconds=expires_in)
await self._async_update_preferences(
access_token, refresh_token, expire_time.isoformat()
)
return access_token
async def async_load_prefere
|
nces(se
|
lf):
"""Load preferences with stored tokens."""
self._prefs = await self._store.async_load()
if self._prefs is None:
self._prefs = {
STORAGE_ACCESS_TOKEN: None,
STORAGE_REFRESH_TOKEN: None,
STORAGE_EXPIRE_TIME: None,
}
async def _async_update_preferences(self, access_token, refresh_token, expire_time):
"""Update user preferences."""
if self._prefs is None:
await self.async_load_preferences()
if access_token is not None:
self._prefs[STORAGE_ACCESS_TOKEN] = access_token
if refresh_token is not None:
self._prefs[STORAGE_REFRESH_TOKEN] = refresh_token
if expire_time is not None:
self._prefs[STORAGE_EXPIRE_TIME] = expire_time
await self._store.async_save(self._prefs)
|
mrnohr/pi-temperature-firebase
|
firebase_log_roll.py
|
Python
|
mit
| 1,702
| 0.006463
|
import requests
import time
import json
# ------------ Do data need to roll
def roll_data_if_needed(secondsAllowed):
# calculate last time
last_roll_time = get_status()
now = int(time.time())
time_since_roll = now - last_roll_time
# do I need to roll?
if time_since_roll > secondsAllowed:
roll_data()
else:
print 'do not roll'
# ------------ Get current status
def get_status():
response = firebaseGet('status.json')
json = response.json()
return json.get('lastRollTime')
# ------------ Roll the data
def roll_data():
# delete backup
firebaseDelete('te
|
mperatures-backup.json')
# update status time
data = '{{"lastRollTime": {:d}}}'.format(int(time.time()))
firebasePut('status.json', data)
# get current values
response = firebaseGet('temperatures.json')
current_values = response.text
# add to backup
firebasePut('temperatures-backup.json', curre
|
nt_values)
# delete current values
firebaseDelete('temperatures.json')
# ------------ Firebase calls
def firebaseGet(path):
return requests.get(getFirebaseUrl(path), params=getFirebaseQueryParams())
def firebasePut(path, data):
requests.put(getFirebaseUrl(path), params=getFirebaseQueryParams(), data=data)
def firebaseDelete(path):
return requests.delete(getFirebaseUrl(path), params=getFirebaseQueryParams())
def getFirebaseQueryParams():
return {'auth': config.get('auth')}
def getFirebaseUrl(path):
return '{}/{}/{}'.format(config.get('base_url'), config.get('pi_name'), path)
# ------------ Data setup
config = json.load(open("/home/pi/config.json"))
# ------------ Let's do this
roll_data_if_needed(60*60*24)
|
ESOedX/edx-platform
|
openedx/core/lib/tests/test_course_tab_api.py
|
Python
|
agpl-3.0
| 653
| 0
|
"""
Tests for the plugin API
"""
from __futu
|
re__ import absolute_import
from django.test import TestCase
from openedx.core.lib.plugins import PluginError
from openedx.core.lib.course_tabs import CourseTabPluginManager
class TestCourseTabApi(TestCase):
"""
Unit tests fo
|
r the course tab plugin API
"""
def test_get_plugin(self):
"""
Verify that get_plugin works as expected.
"""
tab_type = CourseTabPluginManager.get_plugin("instructor")
self.assertEqual(tab_type.title, "Instructor")
with self.assertRaises(PluginError):
CourseTabPluginManager.get_plugin("no_such_type")
|
tanaes/decontaminate
|
decontaminate_unitary.py
|
Python
|
mit
| 40,011
| 0.006148
|
#!/usr/bin/env python
# File created on 09 Aug 2012
from __future__ import division
__author__ = "Jon Sanders"
__copyright__ = "Copyright 2014, Jon Sanders"
__credits__ = ["Jon Sanders"]
__license__ = "GPL"
__version__ = "1.9.1"
__maintainer__ = "Jon Sanders"
__email__ = "jonsan@gmail.com"
__status__ = "Development"
from qiime.util import load_qiime_config, parse_command_line_parameters,\
get_options_lookup, make_option
from qiime.parse import parse_qiime_parameters, parse_taxonomy, parse_mapping_file_to_dict
from qiime.filter import sample_ids_from_metadata_description
from bfillings.uclust import get_clusters_fro
|
m_fasta_filepath
from bfillings.usearch import usearch_qf
from scipy.stats import spearmanr
import os.path
from biom import load_table
import numpy as np
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """
A script to filt
|
er sequences by potential contaminants"""
script_info['script_description'] = """
This script performs a series of filtering steps on a sequence file with the
intent of removing contaminant sequences. It requires input of an OTU table, a
sample map, an OTU map, a sequence FASTA file, and an output directory.
There are two primary approaches the script can take: (1) comparing sequence
abundances in blank control sequence libraries to those in sample libraries,
where sequences present in blanks are presumed to be contaminants, and (2)
comparing sequences in sample libraries to a database of known contaminants.
In approach (1), OTUs (or unique sequences, if OTU table and map are defined at
100% identity) are tested for their maximum and mean presence in blank and
sample libraries, and excluded if they satisfy the given criteria. For example,
if you want to exclude any sequences whose maximum abundance in a blank sample
is more than 10% the maximum abundance in a sample (maxB > 0.1 * maxS), you
would choose '--removal_stat_blank maxB --removal_stat_sample maxS
--removal_differential 0.1'. For this approach, you must also provide a column
in your mapping file that indicates which samples to use as blanks, and pass
this information to the script with the 'valid states' option (e.g.
'Blank:True')
In approach (2), you must provide a fasta library of putative contaminants.
These may be previously clustered OTUs from the blank samples, commonly
sequenced contaminants (if known), or another fasta file. Sequences will be
clustered against this fasta file using Uclust-Ref, and any that match within
a given percent similarity (using the '-c' or '--contaminant_similarity' option)
will be marked as putative contaminants.
When using approach (2), it is possible to remove 'real' sequences from samples
that just happen to be similar to contaminants. This may be detectable when
using unique sequence OTU tables/maps as input, if the 'real' sequences are
nonetheless slightly different from contaminants. In this case, it may be
desireable to reinstate those unique sequences that are present in samples but
not in blanks. You may do this using criteria of relative abundance (similar to
approach [1], where a sequence is reinstated if its max presence in a sample is
greater than its max presence in a blank, i.e. maxS > X * maxB) or of incidence
in non-blank samples (i.e. reinstated if present in two or more samples). If
both criteria are provided, you must choose to reinstate either the intersection
of the criteria (i.e. BOTH more abundant in samples AND present in 2 or more)
or the union (i.e. EITHER more abundant in samples OR present in 2 or more).
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""", """
The following steps are performed by the command below:
1. Calculate max relative abundance of each sequence in samples and blanks
2. Identify sequences whose maximum abunance in blanks is more than 10% their
maximum abundance in samples.
3. Output OTU maps of sequences for which above is true, and for which above is
false.
""", """
decontaminate.py -i unique_seqs_otu_table.biom -o filter_out_dir
-m metadata_mapping_file.txt -f unique_seqs_rep_set.fna
-M unique_seqs_otus.txt -s 'Blank:True' --removal_stat_blank maxB
--removal_stat_sample maxS --removal_differential 0.1
"""))
script_info['output_description'] = """
This script will output a tab-delimited summary table, indicating the relative
abundance stats for each sequence considered, along with its fate at each step
of the process.
It will also output an OTU map for each category of sequences identified (e.g.
those never identified as contaminants, those identified as reference-based
contaminants, those identified as abundance-based contaminants, and those
reinstated). These OTU maps can then be used to filter in the input FASTA file.
Output file naming:
contamination_summary.txt -- tab-delimited per-sequence summary file
assed_otu_map.txt -- OTU map of non-contaminant sequences
ref_contaminants_otu_map.txt -- OTU map of reference contaminant sequences
abund_contaminants_otu_map.txt -- OTU map of abundance contaminant sequences
reinstated_contaminants_otu_map.txt -- OTU map of reinstated sequences
"""
script_info['required_options'] = [
options_lookup["output_dir"]
]
script_info['optional_options'] = [
options_lookup["otu_table_as_primary_input"],
make_option('--mothur_counts_fp',
type='existing_filepath',
help='path to mothur counts table as input'),
options_lookup["mapping_fp"],
make_option('-M', '--otu_map_fp', type="existing_filepath",
help='the input OTU map file'),
make_option('-s',
'--valid_states', type='string',
help="Column header:value pair in mapping file identifying blank samples"),
make_option('--blank_id_fp',
type='existing_filepath',
help='path to file listing blank sample ids'),
options_lookup["input_fasta"],
make_option('--contaminant_db_fp', type="existing_filepath",
help='A FASTA file of potential contaminant sequences'),
make_option('-c', '--contaminant_similarity', type='float', default=0.97,
help=('Sequence similarity threshold for contaminant matches')),
make_option('-r', '--max_correlation', type='float',
help=('Maximum Spearman correlation for contaminant identification')),
make_option('--correlate_header', type='string',
help=('Column header in mapping file with correlation data')),
make_option('--min_relabund_threshold', type="float",
help='discard sequences below this relative abundance threshold'),
make_option('--prescreen_threshold', type="float",
help='prescreen libraries that lose more than this proportion of sequences'),
make_option('--removal_stat_blank', type="choice", choices=["maxB", "avgB"],
help='blank statistic to be used for removal (maxB, avgB)'),
make_option('--removal_stat_sample', type="choice", choices=["maxS", "avgS"],
help='sample statistic to be used for removal (maxS, avgS)'),
make_option('--removal_differential', type="float",
help='differential proportion for removal (maxB > X * maxS)'),
make_option('--reinstatement_stat_blank', type="choice", choices=["maxB", "avgB"],
help='blank statistic to be used for reinstatement (maxB, avgB)'),
make_option('--reinstatement_stat_sample', type="choice", choices=["maxS", "avgS"],
help='sample statistic to be used for reinstatement (maxS, avgS)'),
make_option('--reinstatement_differential', type="float",
help='differential proportion for reinstatement (maxS > X * maxB)'),
make_option('--reinstatement_sample_number', type="int",
help='minimum number of samples necessary for reinstatement'),
make_option('--reinstatement_method', type="choice", choices=["union", "intersection"],
help='method to rectify reinstatement criteria'),
make_option('--drop_lib_threshold', type="float",
help='read loss threshold to drop libraries from
|
catapult-project/catapult
|
devil/devil/android/tools/device_recovery.py
|
Python
|
bsd-3-clause
| 9,284
| 0.010448
|
#!/usr/bin/env vpython
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A script to recover devices in a known bad state."""
import argparse
import glob
import logging
import os
import signal
import sys
import psutil
if __name__ == '__main__':
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil.android import device_denylist
from devil.android import device_errors
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.tools import device_status
from devil.android.tools import script_common
from devil.utils import logging_common
from devil.utils import lsusb
# TODO(jbudorick): Resolve this after experimenting w/ disabling the USB reset.
from devil.utils import reset_usb # pylint: disable=unused-import
logger = logging.getLogger(__name__)
from py_utils import modules_util
# Script depends on features from psutil version 2.0 or higher.
modules_util.RequireVersion(psutil, '2.0')
def KillAllAdb():
def get_all_adb():
for p in psutil.process_iter():
try:
# Retrieve all required process infos at once.
pinfo = p.as_dict(attrs=['pid', 'name', 'cmdline'])
if pinfo['name'] == 'adb':
pinfo['cmdline'] = ' '.join(pinfo['cmdline'])
yield p, pinfo
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p, pinfo in get_all_adb():
try:
pinfo['signal'] = sig
logger.info('kill %(signal)s %(pid)s (%(name)s [%(cmdline)s])', pinfo)
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for _, pinfo in get_all_adb():
try:
logger.error('Unable to kill %(pid)s (%(name)s [%(cmdline)s])', pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def TryAuth(device):
"""Uses anything in ~/.android/ that looks like a key to auth with the device.
Args:
device: The DeviceUtils device to attempt to auth.
Returns:
True if device successfully authed.
"""
possible_keys = glob.glob(os.path.join(adb_wrapper.ADB_HOST_KEYS_DIR, '*key'))
if len(possible_keys) <= 1:
logger.warning('Only %d ADB keys available. Not forcing auth.',
len(possible_keys))
return False
KillAllAdb()
adb_wrapper.AdbWrapper.StartServer(keys=possible_keys)
new_state = device.adb.GetState()
if new_state != 'device':
logger.error('Auth failed. Device %s still stuck in %s.', str(device),
new_state)
return False
# It worked! Now register the host's default ADB key on the device so we don't
# have to do all that again.
pub_key = os.path.join(adb_wrapper.ADB_HOST_KEYS_DIR, 'adbkey.pub')
if not os.path.exists(pub_key): # This really shouldn't happen.
logger.error('Default ADB key not available at %s.', pub_key)
return False
with open(pub_key) as f:
pub_key_contents = f.read()
try:
device.WriteFile(adb_wrapper.ADB_KEYS_FILE, pub_key_contents, as_root=True)
except (device_errors.CommandTimeoutError, device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Unable to write default ADB key to %s.', str(device))
return False
return True
def RecoverDevice(device, denylist, should_reboot=lambda device: True):
if device_status.IsDenylisted(device.adb.GetDeviceSerial(), denylist):
logger.debug('%s is denylisted, skipping recovery.', str(device))
return
if device.adb.GetState() == 'unauthorized' and TryAuth(device):
logger.info('Successfully authed device %s!', str(device))
return
if should_reboot(device):
should_restore_root = device.HasRoot()
try:
device.WaitUntilFullyBooted(retries=0)
except (device_errors.CommandTimeoutError, device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception(
'Failure while waiting for %s. '
'Attempting to recover.', str(device))
try:
try:
device.Reboot(block=False, timeout=5, retries=0)
except device_errors.CommandTimeoutError:
logger.warning(
'Timed out while attempting to reboot %s normally.'
'Attempting alternative reboot.', str(device))
# The device drops offline before we can grab the exit code, so
# we don't check for status.
try:
device.adb.Root()
finally:
# We are already in a failure mode, attempt to reboot regardless of
# what device.adb.Root() returns. If the sysrq reboot fails an
# exception willbe thrown at that level.
device.adb.Shell(
'echo b > /proc/sysrq-trigger',
expect_status=None,
timeout=5,
retries=0)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failed to reboot %s.', str(device))
|
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_fai
|
lure')
except device_errors.CommandTimeoutError:
logger.exception('Timed out while rebooting %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
try:
device.WaitUntilFullyBooted(
retries=0, timeout=device.REBOOT_DEFAULT_TIMEOUT)
if should_restore_root:
device.EnableRoot()
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failure while waiting for %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_failure')
except device_errors.CommandTimeoutError:
logger.exception('Timed out while waiting for %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
def RecoverDevices(devices, denylist, enable_usb_reset=False):
"""Attempts to recover any inoperable devices in the provided list.
Args:
devices: The list of devices to attempt to recover.
denylist: The current device denylist, which will be used then
reset.
"""
statuses = device_status.DeviceStatus(devices, denylist)
should_restart_usb = set(
status['serial'] for status in statuses
if (not status['usb_status'] or status['adb_status'] in ('offline',
'missing')))
should_restart_adb = should_restart_usb.union(
set(status['serial'] for status in statuses
if status['adb_status'] == 'unauthorized'))
should_reboot_device = should_restart_usb.union(
set(status['serial'] for status in statuses if status['denylisted']))
logger.debug('Should restart USB for:')
for d in should_restart_usb:
logger.debug(' %s', d)
logger.debug('Should restart ADB for:')
for d in should_restart_adb:
logger.debug(' %s', d)
logger.debug('Should reboot:')
for d in should_reboot_device:
logger.debug(' %s', d)
if denylist:
denylist.Reset()
if should_restart_adb:
KillAllAdb()
adb_wrapper.AdbWrapper.StartServer()
for serial in should_restart_usb:
try:
# TODO(crbug.com/642194): Resetting may be causing more harm
# (specifically, kernel panics) than it does good.
if enable_usb_reset:
reset_usb.reset_android_usb(serial)
else:
logger.warning('USB reset disabled for %s (crbug.com/642914)', serial)
except IOError:
logger.exception('Unable to reset USB for %s.', serial)
if denylist:
denylist.Extend([serial], reason='USB failure')
except device_errors.DeviceUnreachableError:
logger.exception('Unable to reset USB for %s.', serial)
if denylist:
denylist.Extend([serial], reason='offline')
device_utils.DeviceUtils.parallel(devices).pMap(
RecoverDevice,
denylist,
should_reboot=lambda device: dev
|
tanglu-org/laniakea
|
src/lighthouse/lighthouse/events_receiver.py
|
Python
|
gpl-3.0
| 3,808
| 0.002101
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import zmq
import json
import logging as log
from zmq.eventloop import ioloop, zmqstream
from laniakea.msgstream import verify_event_message, event_message_is_valid_and_signed
class EventsReceiver:
'''
Lighthouse module handling event stream submissions,
registering them and publishing them to the world.
'''
def __init__(self, endpoint, pub_queue):
from glob import glob
from laniakea.localconfig import LocalConfig
from laniakea.msgstream import keyfile
|
_read_verify_key
self._socket = None
self._ctx = zmq.Context.instance()
self._pub_queue = pub_queue
self._endpoint = endpoint
self._trusted_keys = {}
# TODO: Implement auto-reloading of valid keys list if directory changes
for keyfname in glob(os.path.join(LocalConfig().trusted_curve_keys_dir, '*')):
|
signer_id, verify_key = keyfile_read_verify_key(keyfname)
if signer_id and verify_key:
self._trusted_keys[signer_id] = verify_key
def _event_message_received(self, socket, msg):
data = str(msg[1], 'utf-8', 'replace')
try:
event = json.loads(data)
except json.JSONDecodeError as e:
# we ignore invalid requests
log.info('Received invalid JSON message from sender: %s (%s)', data if len(data) > 1 else msg, str(e))
return
# check if the message is actually valid and can be processed
if not event_message_is_valid_and_signed(event):
# we currently just silently ignore invalid submissions
return
signatures = event.get('signatures')
signature_checked = False
for signer in signatures.keys():
key = self._trusted_keys.get(signer)
if not key:
continue
try:
verify_event_message(signer, event, key, assume_valid=True)
except Exception as e:
log.info('Invalid signature on event ({}): {}'.format(str(e), str(event)))
return
# if we are here, we verified a signature without issues, which means
# the message is legit and we can sign it ourselves and publish it
signature_checked = True
if not signature_checked:
log.info('Unable to verify signature on event: {}'.format(str(event)))
return
# now publish the event to the world
self._pub_queue.put([bytes(event['tag'], 'utf-8'),
bytes(data, 'utf-8')])
def run(self):
if self._socket:
log.warning('Tried to run an already running event receiver again.')
return
self._socket = self._ctx.socket(zmq.ROUTER)
self._socket.bind(self._endpoint)
server_stream = zmqstream.ZMQStream(self._socket)
server_stream.on_recv_stream(self._event_message_received)
ioloop.IOLoop.instance().start()
|
mumrah/kafka-python
|
kafka/structs.py
|
Python
|
apache-2.0
| 801
| 0.008739
|
from __future__ import absolute_import
from collections import namedtuple
# Other useful structs
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "por
|
t", "rack"])
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
# TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata)
["offset", "metadata"])
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
# Define retry policy for async producer
# Limit va
|
lue: int >= 0, 0 means no retries
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
|
bcwaldon/changeling-client
|
changeling_client/core.py
|
Python
|
apache-2.0
| 386
| 0
|
import argparse
import changeling_client.api
import changeling_client.commands
parser = argparse.ArgumentP
|
arser()
parser.add_argument('--endpoint', required=True)
subparsers = parser.add_subparsers()
changeling_client.commands.register(subparsers)
def main():
args = parser.parse_args()
service = changeling_client.api.Service(args.endpoint)
|
args.func(service, args)
|
aouyar/PyMunin
|
pysysinfo/util.py
|
Python
|
gpl-3.0
| 13,551
| 0.006937
|
"""Implements generic utilities for monitoring classes.
"""
import sys
import re
import subprocess
import urllib, urllib2
import socket
import telnetlib
__author__ = "Ali Onur Uyar"
__copyright__ = "Copyright 2011, Ali Onur Uyar"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.12"
__maintainer__ = "Ali Onur Uyar"
__email__ = "aouyar at gmail.com"
__status__ = "Development"
buffSize = 4096
timeoutHTTP = 10
def parse_value(val, parsebool=False):
"""Parse input string and return int, float or str depending on format.
@param val: Input string.
@param parsebool: If True parse yes / no, on / off as boolean.
@return: Value of type int, float or str.
"""
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except:
pass
if parsebool:
if re.match('yes|on', str(val), re.IGNORECASE):
return True
elif re.match('no|off', str(val), re.IGNORECASE):
return False
return val
def safe_sum(seq):
"""Returns the sum of a sequence of numbers. Returns 0 for empty sequence
and None if any item is None.
@param seq: Sequence of numbers or None.
"""
if None in seq:
return None
else:
return sum(seq)
def socket_read(fp):
"""Buffered read from socket. Reads all data available from socket.
@fp: File pointer for socket.
@return: String of characters read from buffer.
"""
response = ''
oldlen = 0
newlen = 0
while True:
response += fp.read(buffSize)
newlen = len(response)
if newlen - oldlen == 0:
break
else:
oldlen = newlen
return response
def exec_command(args, env=None):
"""Convenience function that executes command and returns result.
@param args: Tuple of command and arguments.
@param env: Dictionary of environment variables.
(Environment is not modified if None.)
@return: Command output.
"""
try:
cmd = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=buffSize,
env=env)
except OSError, e:
raise Exception("Execution of command failed.\n",
" Command: %s\n Error: %s" % (' '.join(args), str(e)))
out, err = cmd.communicate(None)
if cmd.returncode != 0:
raise Exception("Execution of command failed with error code: %s\n%s\n"
% (cmd.returncode, err))
return out
def get_url(url, user=None, password=None, params=None, use_post=False):
if user is not None and password is not None:
pwdmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
pwdmgr.add_password(None, url, user, password)
auth_handler = urllib2.HTTPBasicAuthHandler(pwdmgr)
opener = urllib2.build_opener(auth_handler)
else:
opener = urllib2.build_opener()
if params is not None:
req_params = urllib.urlencode(params)
if use_post:
req_url = url
data = req_params
else:
req_url = "%s?%s" % (url, req_params)
data = None
else:
req_url = url
data = None
try:
if sys.version_info[:2] < (2,6):
resp = opener.open(req_url, data)
else:
resp = opener.open(req_url, data, timeoutHTTP)
except urllib2.URLError, e:
raise Exception("Retrieval of URL failed.\n"
" url: %s\n Error: %s" % (url, str(e)))
return socket_read(resp)
class NestedDict(dict):
"""Dictionary class facilitates creation of nested dictionaries.
This works:
NestedDict d
d[k1][k2][k3] ... = v
"""
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
try:
return dict.__getitem__(self, key)
except KeyError:
value = self[key] = type(self)()
return value
def set_nested(self, klist, value):
"""D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v"""
keys = list(klist)
if len(keys) > 0:
curr_dict = self
last_key = keys.pop()
for key in keys:
if not curr_dict.has_key(key) or not isinstance(curr_dict[key],
NestedDict):
curr_dict[key] = type(self)()
curr_dict = curr_dict[key]
curr_dict[last_key] = value
class SoftwareVersion(tuple):
"""Class for parsing, storing and comparing versions.
All standard operations for tuple class are supported.
"""
def __init__(self, version):
"""Initialize the new instance of class.
@param version: Version must either be a string or a tuple of integers
or strings representing integers.
Version strings must begin with integer numbers separated by dots and
may end with any string.
"""
self._versionstr = '.'.join([str(v) for v in self])
def __new__(cls, version):
"""Static method for creating a new instance which is a subclass of
immutable tuple type. Versions are parsed and stored as a tuple of
integers internally.
@param cls: Class
@param version: Version must either be a string or a tuple of integers
or strings representing integers.
Version strings must begin with integer numbers separated by dots and
may end with any string.
"""
if isinstance(version, basestring):
mobj = re.match('(?P<version>\d+(\.\d+)*)(?P<suffix>.*)$', version)
if mobj:
version = [int(i) for i in mobj.groupdict()['version'].split('.')]
return tuple.__new__(cls, version)
else:
raise ValueError('Invalid version string format.')
else:
try:
return tuple.__new__(cls, [int(v) for v in version])
except:
raise TypeError("Version must either be a string or an iterable"
" of integers.")
def __str__(self):
"""Returns string representation of version.
"""
return self._versionstr
class TableFilter:
"""Class for filtering rows of tables based on filters on values of columns.
The tables are represented as nested lists (list of lists of columns.)
"""
def __init__(self):
"""Initialize Filter."""
self._filters = {}
def registerFilter(self, column, patterns, is_regex=False,
ignore_case=False):
"""Register filter on a column of table.
@param column: The column name.
@param patterns: A single pattern or a list of patterns used for
matching column values.
@param is_regex: The patterns will be treated as regex if True, the
column values will be tested for equality with the
patterns otherwise.
@param ignore_case: Case insensitive matching will be used if True.
|
"""
if isinstance(patterns, basestring):
patt_list = (patterns,)
elif isinstance(patterns, (tuple, list)):
patt_list = list(patterns)
else:
raise ValueError("The patterns parameter must either be as
|
string "
"or a tuple / list of strings.")
if is_regex:
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
patt_exprs = [re.compile(pattern, flags) for pattern in patt_list]
else:
if ignore_case:
patt_exprs = [pattern.lower() fo
|
Eagles2F/sync-engine
|
migrations/versions/115_eas_twodevices_turn.py
|
Python
|
agpl-3.0
| 3,146
| 0.003179
|
"""EAS two-devices turn
Revision ID: 17dc9c049f8b
Revises: ad7b856bcc0
Create Date: 2014-10-21 20:38:14.311747
"""
# revision identifiers, used by Alembic.
revision = '17dc9c049f8b'
down_revision = 'ad7b856bcc0'
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
from inbox.ignition import main_engine
engine = main_engine()
if not engine.has_table('easaccount'):
return
from inbox.models.session import session_scope
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
class EASAccount(Base):
__table__ = Base.metadata.tables['easaccount']
primary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.primary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
secondary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.secondary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
class EASDevice(Base):
__table__ = Base.metadata.tables['easdevice']
with session_scope(versioned=False) as \
db_session:
accts = db_session.query(EASAccount).all()
for a in accts:
# Set both to filtered=False, //needed// for correct deploy.
primary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
secondary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
a.primary_device = primary
a.secondary_device = secondary
db_session.add(a)
db_session.commit()
conn = op.get_bind()
acct_device_map = dict(
(id_, device_id) for id_, device_id in conn.execute(text(
|
"""SELECT id, secondary_device_id from easaccount""")))
print 'acct_device_map: ', acct_device_map
for acct_id, device_id in acct_device_map.iteritems():
conn.execute(text("""
UPDATE easfoldersyncstatus
SET device_id=:device_id
WHERE account_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
|
conn.execute(text("""
UPDATE easuid
SET device_id=:device_id
WHERE easaccount_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
def downgrade():
raise Exception('!')
|
Wicloz/AnimeNotifierBot
|
bot.py
|
Python
|
mit
| 8,060
| 0.005335
|
import asyncio
import pip
import pickle
import os.path
try:
import discord
except:
pip.main(['install', 'git+https://github.com/Rapptz/discord.py@async#egg=discord.py'])
try:
import win_unicode_console
except:
0
try:
from lxml import html
except ImportError:
pip.main(['install', 'lxml'])
try:
from bs4 import BeautifulSoup
except ImportError:
pip.main(['install', 'BeautifulSoup4'])
from config import Config
from kissanimeConnector import KissDownloader
class User(object):
def __init__(self, userId, userUrl):
self.discordUser = discord.User()
self.discordUser.id = userId
self.id = userId
self.kissUrl = userUrl
self.malUrl = ''
self.ttsChannel = ''
class AnimeBot(discord.Client):
def __init__(self, config_file='config/options.txt', user_file='config/users.txt'):
super().__init__()
self.config = Config(config_file)
self.kissAnime = KissDownloader()
self.users = []
if os.path.isfile(user_file):
with open(user_file, 'rb') as file:
self.users = pickle.load(file)
def run(self):
return super().run(self.config.username, self.config.password)
async def event_loop(self):
await asyncio.sleep(1)
while True:
for user in self.users:
try:
await self.check_for_user(user)
except Exception as e:
print(e)
print('Could not check updates for %s' % user.id)
await asyncio.sleep(300)
async def on_ready(self):
try:
win_unicode_console.enable()
except:
0
await self.change_status(game=discord.Game(name='with Eruru\'s tail'))
print('Connected!\n')
print('Username: %s' % self.user.name)
print('Bot ID: %s' % self.user.id)
print()
print('Command prefix is %s'% self.config.command_prefix)
print()
print('--Connected Servers List--')
if self.servers:
[print(s) for s in self.servers]
else:
print('No servers have been joined yet.')
print()
print('--Users Registered--')
if len(self.users) > 0:
for user in self.users:
print(user.id + ' - ' + user.kissUrl)
else:
print('No users have registered yet.')
print()
print('--Log--')
handler = getattr(self, 'event_loop', None)
await handler()
async def on_message(self, message):
if (message.channel.is_private) and (message.author != self.user) and (message.content.startswith(self.config.command_prefix)):
command = message.content[:message.content.find(' ')].replace(self.config.command_prefix, '')
data = message.content[message.content.find(' ')+1:]
if command == 'register':
if data.startswith('https://kissanime.to/MyList/'):
self.handle_register_user(message.author.id, data)
elif data.isdigit():
self.handle_register_user(message.author.id, 'https://kissanime.to/MyList/' + data)
if command == 'settts':
self.handle_set_tts(message.author.id, data)
def handle_register_user(self, userId, userUrl):
if self.get_user(userId) == 0:
user = User(userId, userUrl)
se
|
lf.users.append(user)
|
print('Added user \'%s\' with url \'%s\'' % (userId, userUrl))
else:
self.get_user(userId).userUrl = userUrl
print('Updated bookmark url for user \'%s\'' % userId)
with open('config/users.txt', 'wb') as file:
pickle.dump(self.users, file)
def handle_set_tts(self, userId, channel):
user = self.get_user(userId)
if not user == 0:
user.ttsChannel = channel
print('Updated tts channel for \'%s\'' % userId)
with open('config/users.txt', 'wb') as file:
pickle.dump(self.users, file)
def get_user(self, userId):
for user in self.users:
if user.id == userId:
return user
return 0
async def check_for_user(self, user):
print('Checking bookmarks for \'%s\'...' % user.id)
cachedFilePath = 'cache/%s.dat' % user.id
kissDomain = 'https://kissanime.to'
colonId = '(*:*)'
# Download the users bookmark page
if os.path.isfile('bookmarkpage.html'):
with open('bookmarkpage.html', 'r') as file:
bookmarkPage = file.read()
else:
bookmarkPage = self.kissAnime.downloadPage(user.kissUrl).replace('\\r\\n', '')
#with open('bookmarkpage.html', 'w') as file:
# file.write(bookmarkPage)
# Turn the page into a list
newList = self.kiss_list_from_bookmarks(bookmarkPage)
# Load the old list from the file
oldList = {}
if os.path.isfile(cachedFilePath):
for line in open(cachedFilePath, 'r'):
try:
key, value = line.strip().split(': ')
key = key.replace(colonId, ':')
oldList[key] = tuple(value.replace('\'', '').replace('(', '').replace(')', '').split(', '))
except:
0 #best code evah
# Compare the lists and send messages
for key, newValue in newList.items():
try:
oldValue = oldList[key]
except:
oldValue = (newValue[0], '')
if oldValue[0] != newValue[0]:
await self.send_message(user.discordUser, 'The anime **%s** has just aired episode %s!\n%s' % (key, newValue[0], kissDomain + newValue[1]))
if (user.ttsChannel) and not (user.ttsChannel == ''):
channel = self.get_channel_class(user.ttsChannel.split('/')[0], user.ttsChannel.split('/')[1])
if not channel == 0:
message = 'The any may %s has just aired episode %s!' % (key.replace('.', '').replace('!', '').replace(',', '').replace(':', '').replace(';', ''), newValue[0])
await self.send_message(channel, message, tts=True)
# Save the new list into the file
with open(cachedFilePath, 'w') as file:
for key, value in newList.items():
file.write('%s: %s\n' % (key.replace(':', colonId), value))
print('Done checking bookmarks for \'%s\'!' % user.id)
def get_channel_class(self, serverId, channelId):
for server in self.servers:
if server.id == serverId:
for channel in server.channels:
if channel.id == channelId:
return channel
return 0
def kiss_list_from_bookmarks(self, content):
dataList = {}
table = content[content.find('<table class="listing">'):content.find('</table>')]
table = table[table.find('<tr class="trAnime'):table.find('</tbody>')]
rows = table.split('</tr>')
del rows[-1]
for row in rows:
try:
row += '</tr>'
soup = BeautifulSoup(row, 'html.parser')
key = soup.find_all('a')[1].string.strip()
episode = soup.find_all('a')[2].string.replace('Episode', '').replace('(', '[').replace(')', ']').strip()
link = soup.find_all('a')[1].get('href')
dataList[key] = (episode, link)
except:
0
return dataList
def kiss_latest_episode(self, content):
bowl = BeautifulSoup(content, 'html.parser').table
soup = BeautifulSoup(str(bowl), 'html.parser')
episode = soup.find_all('a')[0].string[-3:]
return episode
if __name__ == '__main__':
bot = AnimeBot()
bot.run()
|
trec-dd/trec-dd-simulation-harness
|
trec_dd/__init__.py
|
Python
|
mit
| 272
| 0
|
'''tr
|
ec_dd.* namespace package can have several subpackages, see
http://github.com/trec-dd for more info
.. This software is released under an MIT/X11 open source license.
Copyright 2015 Diffeo, Inc.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__
|
)
|
CanalTP/navitia
|
source/jormungandr/jormungandr/scenarios/journey_filter.py
|
Python
|
agpl-3.0
| 32,453
| 0.00265
|
# Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
import itertools
import datetime
import abc
import six
from jormungandr.scenarios.utils import compare, get_or_default
from navitiacommon import response_pb2
from jormungandr.utils import pb_del_if, ComposedFilter, portable_min
from jormungandr.fallback_modes import FallbackModes
from jormungandr.scenarios.qualifier import get_ASAP_journey
def delete_journeys(responses, request):
if request.get('debug', False):
return
nb_deleted = 0
for r in responses:
nb_deleted += pb_del_if(r.journeys, lambda j: to_be_deleted(j))
if nb_deleted:
logging.getLogger(__name__).info('filtering {} journeys'.format(nb_deleted))
def to_be_deleted(journey):
return 'to_delete' in journey.tags
def mark_as_dead(journey, is_debug, *reasons):
journey.tags.append('to_delete')
if is_debug:
journey.tags.extend(('deleted_because_' + reason for reason in reasons))
@six.add_metaclass(abc.ABCMeta)
class SingleJourneyFilter(object):
"""
Interface to implement for filters applied to a single journey (no comparison)
"""
@abc.abstractmethod
def filter_func(self, journey):
"""
:return: True if the journey is valid, False otherwise
"""
pass
@abc.abstractproperty
def message(self):
"""
:attribute: a one-word, snake_case unicode to be used to explain why a journey is filtered
"""
pass
def filter_wrapper(filter_obj=None, is_debug=False):
"""
Wraps a SingleJourneyFilter instance to automatically deal with debug-mode, logging and tagging
If filtered, the journey is tagged 'to_delete' (regardless of the debug-mode)
In debug-mode, we deactivate filtering, only add a tag with the reason why it's deleted
The main purpose of debug mode is to have all journeys generated (even the ones that should be filtered)
and log ALL the reasons why they are filtered (if they are)
:param filter_obj: a SingleJourneyFilter to be wrapped (using its message and filter_func attributes)
:param is_debug: True if we are in debug-mode
:return: a function to be called on a journey, returning True or False,tagging it if it's deleted
"""
filter_func = filter_obj.filter_func
message = filter_obj.message
def wrapped_filter(journey):
logger = logging.getLogger(__name__)
res = filter_func(journey)
if not res:
logger.debug("We delete: {}, cause: {}".format(journey.internal_id, message))
mark_as_dead(journey, is_debug, message)
return res or is_debug
return wrapped_filter
def filter_journeys(responses, instance, request):
"""
Filter by side effect the list of pb responses's journeys
"""
is_debug = request.get('debug', False)
# DEBUG
if is_debug:
logger = logging.getLogger(__name__)
logger.debug("All journeys:")
[_debug_journey(j) for j in get_all_journeys(responses)]
logger.debug("Qualified journeys:")
[_debug_journey(j) for j in get_qualified_journeys(responses)]
# build filters
min_bike = request.get('_min_bike', None)
min_car = request.get('_min_car', None)
min_taxi = request.get('_min_taxi', None)
min_ridesharing = request.get('_min_ridesharing', None)
orig_modes = request.get('origin_mode', [])
dest_modes = request.get('destination_mode', [])
min_nb_transfers = request.get('min_nb_transfers', 0)
max_waiting_duration = request.get('max_waiting_duration')
if not max_waiting_duration:
max_waiting_duration = instance.max_waiting_duration
filters = [
FilterTooShortHeavyJourneys(
min_bike=min_bike, min_car=min_car, min_taxi=min_taxi, min_ridesharing=min_ridesharing
),
FilterTooLongWaiting(max_waiting_duration=max_waiting_duration),
FilterMinTransfers(min_nb_transfers=min_nb_transfers),
]
# TODO: we should handle this better....
if (request.get('_override_scenario') or instance._scenario_name) == 'distributed':
filters.append(FilterTooLongDirectPath(instance=instance, request=request))
# we add more filters in some special cases
max_successive = request.get('_max_successive_physical_mode', 0)
if max_successive != 0:
limited_mode_id = instance.successive_physical_mode_to_limit_id # typically : physical_mode:bus
filters.append(
FilterMaxSuccessivePhysicalMode(
successive_physical_mode_to_limit_id=limited_mode_id, max_successive_physical_mode=max_successive
)
)
dp = request.get('direct_path', 'indifferent')
if
|
dp != 'indifferent':
filters.append(FilterDirectPath(dp=dp))
dp_mode = request.get('direct_path_mode', [])
if dp_mode:
filters.append(FilterDirectPathMode(dp_mode))
# compose filters
composed_filter = ComposedFilter()
|
for f in filters:
composed_filter.add_filter(filter_wrapper(is_debug=is_debug, filter_obj=f))
journey_generator = get_qualified_journeys
if is_debug:
journey_generator = get_all_journeys
return composed_filter.compose_filters()(journey_generator(responses))
class FilterTooShortHeavyJourneys(SingleJourneyFilter):
message = 'too_short_heavy_mode_fallback'
def __init__(self, min_bike=None, min_car=None, min_taxi=None, min_ridesharing=None):
self.min_bike = min_bike
self.min_car = min_car
self.min_taxi = min_taxi
self.min_ridesharing = min_ridesharing
def filter_func(self, journey):
"""
We filter the journeys that use an "heavy" mode as fallback for a short time.
Typically you don't take your car for only 2 minutes.
Heavy fallback modes are Bike and Car, BSS is not considered as one.
We also filter too short direct path except for Bike
"""
def _exceed_min_duration(min_duration, total_duration):
return total_duration < min_duration
def _is_bike_direct_path(journey):
return len(journey.sections) == 1 and journey.sections[0].street_network.mode == response_pb2.Bike
# We do not filter direct_path bike journeys
if _is_bike_direct_path(journey=journey):
return True
on_bss = False
for s in journey.sections:
if s.type == response_pb2.BSS_RENT:
on_bss = True
elif s.type == response_pb2.BSS_PUT_BACK:
on_bss = False
elif s.type != response_pb2.STREET_NETWORK:
continue
min_mode = None
total_duration = 0
if s.street_network.mode == response_pb2.Car:
min_mode = self.min_car
total_duration = journey.durations.car
elif s.street_network.mod
|
lirui0081/depotwork
|
depotwork/questions/forms.py
|
Python
|
mit
| 1,000
| 0.023
|
from django import forms
from depotwork.questions.models import Question, Answer
class QuestionForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),
max_length=255)
description = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control'}),
|
max_length=2000)
tags = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),
max_length=255,
required=False,
help_text='Use spaces to separate the tags, such as "asp.net mvc5 javascript"')
class Meta:
model = Question
fields = ['title', 'description', 'tags']
class AnswerF
|
orm(forms.ModelForm):
question = forms.ModelChoiceField(widget=forms.HiddenInput(), queryset=Question.objects.all())
description = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control', 'rows':'4'}),
max_length=2000)
class Meta:
model = Answer
fields = ['question', 'description']
|
tylerbrockett/reddit-bot-buildapcsales
|
src/bot_modules/reddit_handler.py
|
Python
|
mit
| 3,944
| 0.002535
|
"""
==========================================
Author: Tyler Brockett
Username: /u/tylerbrockett
Description: Alert Bot (Formerly sales__bot)
Date Created: 11/13/2015
Date Last Edited: 12/20/2016
Version: v2.0
==========================================
"""
import praw
import traceback
from utils.logger import Logger
from utils.color import Color
from utils import output
from prawcore.exceptions import Redirect
from prawcore.exceptions import Forbidden
class RedditHandler:
def __init__(self, credentials):
output.startup_message(credentials)
self.credentials = credentials
self.reddit = self.connect()
self.NUM_POSTS = 20
def connect(self):
try:
reddit = praw.Reddit(
client_id=self.credentials['client_id'],
client_secret=self.credentials['client_secret'],
password=self.credentials['password'],
user_agent=self.credentials['user_agent'],
username=self.credentials['username'])
return reddit
except:
raise RedditHelperException('Error connecting to Reddit\n\n' + traceback.format_exc())
def disconnect(self):
self.reddit = None
def reset(self):
try:
self.disconnect()
self.reddit = self.connect()
except:
raise RedditHelperException(RedditHelperException.RESET_EXCEPTION + '\n\n' + traceback.format_exc())
def get_instance(self):
return self.reddit
def get_unread(self):
ret = []
unread = self.reddit.inbox.unread(limit=None)
for message in unread:
ret.append(message)
ret.reverse()
return ret
def get_message(self, message_id):
return self.reddit.inbox.message(mess
|
age_id)
def send_message(self, redditor, subject, body):
try:
self.reddit.redditor(redditor).message(subject, body)
except:
Logger.log(traceback.format_exc(), Color.RED)
raise RedditHelperException(RedditHelperException.SEND_MESSAGE_EXCEPTION)
def get_submissions(self, subreddit):
submissions = []
posts = 200 if (subreddit == 'all') else self.NUM_POSTS
|
try:
subs = self.reddit.subreddit(subreddit).new(limit=posts)
for submission in subs:
submissions.append(submission)
except Forbidden as e:
Logger.log(traceback.format_exc(), Color.RED)
return []
except Exception as e:
Logger.log(traceback.format_exc(), Color.RED)
raise RedditHelperException(RedditHelperException.GET_SUBMISSIONS_EXCEPTION)
return submissions
def get_original_message_id(self, received_message, database):
message = received_message
while message.parent_id and len(database.get_subscriptions_by_message_id(str(message.author), message.id)) == 0:
message = self.reddit.inbox.message(message.parent_id[3:])
return message.id
def check_invalid_subreddits(self, subreddits):
invalid = []
for subreddit in subreddits:
try:
for submission in self.reddit.subreddit(subreddit).new(limit=1):
print('subreddit is valid')
except Redirect: # was praw.errors.InvalidSubreddit without 'len()' around call in the try block
Logger.log(traceback.format_exc(), Color.RED)
invalid.append(subreddit)
return invalid
class RedditHelperException(Exception):
SEND_MESSAGE_EXCEPTION = 'Error sending message'
RESET_EXCEPTION = 'Error resetting connection to Reddit'
GET_SUBMISSIONS_EXCEPTION = 'Error getting submissions'
def __init__(self, error_args):
Exception.__init__(self, 'Reddit Exception: {0}'.format(error_args))
self.errorArgs = error_args
|
Wosser1sProductions/gr-lora
|
examples/lora-whitening/createWhiteningValues.py
|
Python
|
gpl-3.0
| 3,178
| 0.011328
|
#!/usr/bin/python2
import collections
import os
from loranode import RN2483Controller
# from ../_examplify.py import Examplify
import os
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
from _examplify import Examplify
import lora, pmt, osmosdr
from gnuradio import gr, blocks
class ReceiveWhitening:
def __init__(self, sf = 7, output_file = './test_out.csv'):
self.target_freq = 868.1e6
self.sf = sf
self.samp_rate = 1e6
self.capture_freq = 868.0e6
self.offset = -(self.capture_freq - self.target_freq)
self.inputFile = './'
self.outputFile = output_file
self.tempFile = '/tmp/whitening_out'
self.tb = None
def captureSequence(self, inputFile):
self.inputFile = inputFile
if os.path.isfile(self.inputFile):
self.tb = gr.top_block()
self.file_source = blocks.file_source(gr.sizeof_gr_complex*1, self.inputFile, False) # Repeat input: True/False
self.lora_lora_receiver_0 = lora.lora_receiver(self.samp_rate, self.capture_freq, self.offset, self.sf, self.samp_rate)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, self.samp_rate, True)
self.tb.connect( (self.file_source, 0), (self.blocks_throttle_0, 0))
self.tb.connect( (self.blocks_throttle_0, 0), (self.lora_lora_receiver_0, 0))
self.tb.run()
self.tb = None
if os.path.isfile(self.tempFile):
if os.path.isfile(self.outputFile):
inf = open(self.tempFile, 'r')
seq = inf.read()
# print(seq)
out = open(self.outputFile, 'a')
out.write(seq)
out.close()
inf.close()
else:
raise Exception("[ReceiveWhitening] Outputfile '" + self.outputFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Tempfile '" + self.tempFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Inputfile '" + self.inputFile + "' does not exist!")
if __name__ == '__main__':
ofile = '/tmp/tmp_whitening.cfile'
testset = [ (7, "4/6"), (7, "4/7"), (8, "4/5"), (12, "4/6"), (9, "4/5"), (10, "4/5"), (11, "4/5"), (6, "4/5")]
for settings in testset:
dataf = './test_out_SF{0:d}_CR{1:s}.csv'.format(settings[0], '-'.join(settings[1].split('/')))
out = open(dataf, 'a')
out.close()
examplifr = Examplify(settings[0], settings[1], gains = [32, 38, 38])
w
|
hitening = ReceiveWhitening(settings[0], dataf)
for i in range(8):
print("Sample {0:d} of 16".format(i))
examplifr.transmitToFile(['0' * 256] * 4, ofile)
w
|
hitening.captureSequence(ofile)
for i in range(8):
print("Sample {0:d} of 16".format(i + 8))
examplifr.transmitToFile(['0' * 256] * 8, ofile)
whitening.captureSequence(ofile)
examplifr = None
whitening = None
|
exaile/exaile
|
xl/externals/sigint.py
|
Python
|
gpl-2.0
| 2,472
| 0.002427
|
#
# Allows GTK 3 python applications to exit when CTRL-C is raised
# From htt
|
ps://bugzilla.gnome.org/show_bug.cgi?id=622084
#
# Author: Simon Feltman
# License: Presume same as pygobject
#
import sys
import signal
from typing import ClassVar, List
from gi.repository import GLi
|
b
class InterruptibleLoopContext:
"""
Context Manager for GLib/Gtk based loops.
Usage of this context manager will install a single GLib unix signal handler
and allow for multiple context managers to be nested using this single handler.
"""
#: Global stack context loops. This is added to per InterruptibleLoopContext
#: instance and allows for context nesting using the same GLib signal handler.
_loop_contexts: ClassVar[List['InterruptibleLoopContext']] = []
#: Single source id for the unix signal handler.
_signal_source_id = None
@classmethod
def _glib_sigint_handler(cls, user_data):
context = cls._loop_contexts[-1]
context._quit_by_sigint = True
context._loop_exit_func()
# keep the handler around until we explicitly remove it
return True
def __init__(self, loop_exit_func):
self._loop_exit_func = loop_exit_func
self._quit_by_sigint = False
def __enter__(self):
# Only use unix_signal_add if this is not win32 and there has
# not already been one.
if sys.platform != 'win32' and not InterruptibleLoopContext._loop_contexts:
# Add a glib signal handler
source_id = GLib.unix_signal_add(
GLib.PRIORITY_DEFAULT, signal.SIGINT, self._glib_sigint_handler, None
)
InterruptibleLoopContext._signal_source_id = source_id
InterruptibleLoopContext._loop_contexts.append(self)
def __exit__(self, exc_type, exc_value, traceback):
context = InterruptibleLoopContext._loop_contexts.pop()
assert self == context
# if the context stack is empty and we have a GLib signal source,
# remove the source from GLib and clear out the variable.
if (
not InterruptibleLoopContext._loop_contexts
and InterruptibleLoopContext._signal_source_id is not None
):
GLib.source_remove(InterruptibleLoopContext._signal_source_id)
InterruptibleLoopContext._signal_source_id = None
if self._quit_by_sigint:
# caught by _glib_sigint_handler()
raise KeyboardInterrupt
|
skosukhin/spack
|
var/spack/repos/builtin/packages/texlive/package.py
|
Python
|
lgpl-2.1
| 3,446
| 0.00058
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# publis
|
hed by the Free Software Foundation) versio
|
n 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Texlive(Package):
"""TeX Live is a free software distribution for the TeX typesetting
system. Heads up, it's is not a reproducible installation."""
homepage = "http://www.tug.org/texlive"
# Install from specific site because the texlive mirrors do not
# all update in synchrony.
#
# BEWARE: TexLive updates their installs frequently (probably why
# they call it *Live*...). There is no good way to provide a
# repeatable install of the package.
#
# We're now pulling the installation bits from tug.org's repo of
# historic bits. This means that the checksum for the installer
# itself is stable. Don't let that fool you though, it's still
# installing TeX **LIVE** from e.g. ctan.math.... below, which is
# not reproducible.
version('live', '8f8fc301514c08a89a2e97197369c648',
url='ftp://tug.org/historic/systems/texlive/2017/install-tl-unx.tar.gz')
# There does not seem to be a complete list of schemes.
# Examples include:
# full scheme (everything)
# medium scheme (small + more packages and languages)
# small scheme (basic + xetex, metapost, a few languages)
# basic scheme (plain and latex)
# minimal scheme (plain only)
# See:
# https://www.tug.org/texlive/doc/texlive-en/texlive-en.html#x1-25025r6
variant(
'scheme',
default='small',
values=('minimal', 'basic', 'small', 'medium', 'full'),
description='Package subset to install'
)
depends_on('perl', type='build')
def install(self, spec, prefix):
# Using texlive's mirror system leads to mysterious problems,
# in lieu of being able to specify a repository as a variant, hardwire
# a particular (slow, but central) one for now.
_repository = 'http://ctan.math.washington.edu/tex-archive/systems/texlive/tlnet/'
env = os.environ
env['TEXLIVE_INSTALL_PREFIX'] = prefix
perl = which('perl')
scheme = spec.variants['scheme'].value
perl('./install-tl', '-scheme', scheme,
'-repository', _repository,
'-portable', '-profile', '/dev/null')
|
jeffrimko/Qprompt
|
examples/ask_2.py
|
Python
|
mit
| 177
| 0
|
import os
import qprompt
path = qpro
|
mpt.ask_str("Enter path to file", vld=lambda x: os.path.isfile(x))
size = qprompt.ask_int("Enter numb
|
er less than 10", vld=lambda x: x < 10)
|
jomolinare/kobocat
|
onadata/apps/main/tests/test_form_show.py
|
Python
|
bsd-2-clause
| 18,586
| 0
|
import os
from unittest import skip
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from onadata.apps.main.views import show, form_photos, update_xform, profile,\
enketo_preview
from onadata.apps.logger.models import XForm
from onadata.apps.logger.views import download_xlsform, download_jsonform,\
download_xform, delete_xform
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.viewer.views import export_list, map_view
from onadata.libs.utils.logger_tools import publish_xml_form
from onadata.libs.utils.user_auth import http_auth_string
from test_base import TestBase
class TestFormShow(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form()
self.url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def test_show_form_name(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.xform.id_string)
def test_hide_from_anon(self):
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 302)
def test_hide_from_not_user(self):
self._create_user_and_login("jo")
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_show_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 200)
def test_dl_xlsx_xlsform(self):
self._publish_xlsx_file()
response = self.client.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': 'exp_one'
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
"attachment; filename=exp_one.xlsx")
def test_dl_xls_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xls_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_jsonp_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
callback = 'jsonpCallback'
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), {'callback': callback})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.startswith(callback + '('), True)
self.assertEqual(response.content.endswith(')'), True)
def test_dl_json_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_for_cors_options(self):
response = self.anon.options(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
allowed_headers = ['Accept', 'Origin', 'X-Requested-With',
'Authorization']
control_headers = response['Access-Control-Allow-Headers']
provided_headers = [h.strip() for h in control_headers.split(',')]
self.assertListEqual(allowed_headers, provided_headers)
self.assertEqual(response['Access-Control-Allow-Methods'], 'GET')
self.assertEqual(response['Access-Control-Allow-Origin'], '*')
def test_dl_xform_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_authenticated_non_owner(self):
self._create_user_and_login('alice', 'alice')
response = self.client.get(reverse(download_xform, kwargs={
'username': 'bob',
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_show_private_if_shared_but_not_data(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertContains(response, 'PRIVATE')
def test_show_link_if_shared_and_data(self):
self.xform.shared = True
self.xform.shared_data = True
self.xform.save()
self._submit_transport_instance()
response = self.anon.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
def test_show_link_if_owner(self):
self._submit_transport_instance()
response = self.client.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
}))
self.assertNotContains(response, reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
# check that a form with geopoints has the map url
response = self._publish_xls_file(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "gps.xls"))
self.assertEqual(response.status_code, 200)
self.xform = XForm.
|
objects.latest('date_cr
|
eated')
show_url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
map_url = reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
response = self.client.get(show_url)
# check that map url doesnt show before we have submissions
self.assertNotContains(response, map_url)
# make a submission
self._make_submission(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "instances",
"gps_1980-01-23_20-52-08.xml")
)
self.assertEqual(self.resp
|
ceph/ceph-docker
|
maint-lib/stagelib/git.py
|
Python
|
apache-2.0
| 1,889
| 0.004764
|
# Copyright (c) 2017 SUSE LLC
import logging
import re
import subprocess
# Run a command, and return the result in string format, stripped. Return None if command fails.
def _run_cmd(cmd_array):
try:
return subprocess.check_output(cmd_array).decode("utf-8").strip()
except subprocess.CalledProcessError as c:
logging.warning('Command {} return error code [{}]:'.format(c.cmd, c.returncode))
return None
def get_repo():
"""Returns the current git repo; or 'Unknown repo' if there is an error."""
repo = _run_cmd(['git', 'ls-remote', '--get-url', 'origin'])
return 'Unknown repo' if repo is None else repo
def get_branch():
"""Returns the current git branch; or 'Unknown branch' if there is an error."""
branch = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
return 'Unknown branch' if branch is None else branch
def get_hash():
"""Returns the current git commit hash; or 'Unknown commit hash' if there is an error."""
commithash = _run_cmd(['git', 'rev-parse', '--verify', 'HEAD'])
return 'Unknown commit hash' if commithash is None else commithash
def file_is_dirty(f
|
ile_path):
"""If a file is new, modified, or deleted in git's tracking return True. False otherwise."""
file_status_msg = _run_cmd(['git', 'status', '--untracked-files=all', str(file_path)])
# git outputs filename on a line prefixed by whitespace if the file is new/modified/deleted
if re.match(r'^\s*' + file_path + '$', file_status_msg):
return True
|
return False
def branch_is_dirty():
"""
If any files are new, modified, or deleted in git's tracking return True. False otherwise.
"""
branch_status_msg = _run_cmd(['git', 'status', '--untracked-files=all', '--porcelain'])
# --porcelain returns no output if no changes
if branch_status_msg:
return True
return False
|
brunetto/MasterThesisCode
|
master_code/otherMiscCode/CF_CCF-code-template/serial.py
|
Python
|
mit
| 14,251
| 0.008841
|
#!/usr/bin/env python
##########################
# Modules import.
##########################
import f_dist # to prevent segmentation fault
import os
import logging
import sys, argparse
import numpy as np
import time
from shutil import copy as shcopy
import modules as mod
####################################################################################
# MAIN #
####################################################################################
def main(argv=None):
###########################################
# Parameters and arguments parsing
###########################################
tt_glob = time.time()
# Parameters load from config file.
v = mod.var('../config.txt')
if argv is None:
# Check for CLI variables.
argv = sys.argv
parser = argparse.ArgumentParser()
parser.add_argument('-f1', '-file_1', '--file_1', action='store', dest='file_1', default=v.file_1,
help='Filename of the first set')
parser.add_argument('-f2', '-file_2', '--file_2', action='store', dest='file_2', default=v.file_2,
help='Filename of the second set')
parser.add_argument('-l', '--leafsize', action='store', dest='leafsize', type=int, default=v.leafsize,
help='Max number of particles in a leaf')
parser.add_argument('-n', '--num_part', action='store', dest='num_part', type=int, default=v.n_sel,
help='Number of particles to be selected')
parser.add_argument('-m', '-m_factor', '--m_factor', action='store', dest='m_factor', default=1,
help='How many randoms respect to the data?')
parser.add_argument('-s', '--strategy', action='store', dest='strategy', default=v.strategy,
help='Counting strategy: log_nosqrt_sort, log_nosqrt_nosort')
parser.add_argument('-t', '--test', action='store_true', dest='test', default=None,
help='Test run.')
parser.add_argument('-log', '-lg', '--log', action='store', dest='log_file', default=None,
help='Log file basename')
parser.add_argument('-c', '--console', action='store_true', dest='console', default=False,
help='Add a console output')
parser.add_argument('-sl', '--slicing', action='store_true', dest='slicing', default=False,
help='Activate the slicing on the second set.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
cli = parser.parse_args()
# Overwrite the config file parameters
v.file_1 = cli.file_1
v.file_2 = cli.file_2
v.leafsize = cli.leafsize
v.n_sel = cli.num_part
m_factor = int(cli.m_factor)
v.strategy = cli.strategy
v.log_file = cli.log_file
v.test = cli.test
v.console = cli.console
slicing = cli.slicing
elif isinstance(argv, dict):
# Reading vari
|
ables passed to Main as a function (Guido docet
# http://www.artima.com/weblogs/viewpost.jsp?thread=4829).
for i in argv.keys():
if i in ("-f1", "--file_1"):
v.file_1 = argv[i]
elif i in ("-f2", "--file_2"):
v.file_2 = argv[i]
elif i in ("-l", "--leafsize"):
v.leafsize = argv[i]
elif i in ("-n", "--num_part"):
|
v.n_sel = argv[i]
elif i in ("-s", "--strategy"):
v.strategy = argv[i]
elif i in ("-log", "-lg", "--log"):
v.strategy = argv[i]
elif i in ("-t", "--test"):
v.test = True
elif i in ("-c", "--console"):
v.console = argv[i]
elif i in ("-sl", "--slicing"):
slicing = argv[i]
else:
print "Wrong parameter passed to main function, exit!!!"
sys.exit(1)
##################
# Logger
##################
# Create logger.
logger = logging.getLogger("Main_log")
logging.captureWarnings(True)
logger.setLevel(logging.DEBUG)
# Create file handler which logs even debug messages.
fh = logging.FileHandler(v.log_file+".log", 'w')
fh.setLevel(logging.DEBUG)
# Create formatter.
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
# Add the handlers to the logger.
logger.addHandler(fh)
if v.console is True:
# Create console handler.
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Start logging.
logger.info("Log started.")
logger.info("Process PID is %s", os.getpid())
logger.info("Copying proc status file at the beginning.")
status_src = '/proc/'+str(os.getpid())+'/status'
status_dest = '../logs/status'+str(os.getpid())+'@beginning'
shcopy(status_src, status_dest)
#################
# Test
#################
# Check for test run.
if v.test is not None:
logger.info("Selected test %s", cli.test)
if v.test == True:
stats_array = mod.trav_test(v, True)
logger.info("plotting test data...")
mod.plot_trav_dat(v.log_file+"_trav"+".dat", v.log_file+"_dist"+".dat")
logger.info("End test.")
logger.info( "Wall time for all %s", time.time()-tt_glob)
return 42
else:
logger.error("Something wrong in test selection.")
else:
logger.info("This is not a test!")
# Dictionary for statistics.
stats = {}
#######################################
# Data reading or creation
######################################
# Retrieve the positions.
p_1 = mod.hdf5_data(v.path, v.file_1, v.n_sel)
# Binary gif files.
#p_1 = mod.gif2(v.file_1)
#p_2 = mod.gif2(v.file_2)
# Random data for test.
#p_1 = mod.random_data(10000, b_size_1, 0)
#p_2 = mod.random_data(10000, b_size_1, 0)
# Usefull spatial info about the first set.
min_1 = np.amin(p_1, 0)
max_1 = np.amax(p_1, 0)
b_size_1 = max_1 - min_1
offset_1 = np.array([min_1[0],0,0])
if slicing == False:
logger.info("slicing is off.")
p_2 = mod.hdf5_data(v.path, v.file_2, v.n_sel)
elif slicing == True:
logger.info("slicing is on.")
p_2 = mod.hdf5_data_slice(v.path, v.file_2, min_1[0],
max_1[0], v.r_max, v.n_sel)
else:
print "problem with slicing choice"
# Usefull spatial info about the second set.
min_2 = np.amin(p_2, 0)
max_2 = np.amax(p_2, 0)
b_size_2 = max_2 - min_2
offset_2 = np.array([min_2[0],0,0])
logger.info("First set limits %s %s", min_1, max_1)
logger.info("Second set limits %s %s", min_2, max_2)
stats['Set 1'] = p_1.shape[0]
stats['Set 2'] = p_2.shape[0]
logger.info("path is %s", v.path)
logger.info("filenames %s, %s", v.file_1, v.file_2)
# Check for data self correlation.
if p_1.size == p_2.size:
self_corr = (p_1==p_2).all()
else:
self_corr = False
logger.info("Data self correlation is %s", self_corr)
logger.info("We are goingo to create random %s * dim_data.", m_factor)
# Generate random.
random_1 = mod.random_data(p_1.shape[0]*m_factor, b_size_1, offset_1) #fof
if self_corr == True:
random_2 = random_1
else:
random_2 = mod.random_data(p_2.shape[0]*m_factor, b_size_2, offset_2) #particles
# Create the result files.
result_file = open(v.log_file+'-result.dat', 'a')
###################
# Binning
###################
# Generate binning.
logger.info("Binning...")
shell, r = mod.binning(v.r_min, v.r_max, v.r_step, v.strategy)
# Save binning.
result_file.write("Shells ")
np.savetxt(result_file, shell[np.newaxis,:])
result_file.write("Radii ")
np.savetxt(result_file, r[np.newaxis,:])
result_file.flus
|
devnull5475/SI_ORAWSV_POC
|
src/test/py/test.py
|
Python
|
gpl-2.0
| 206
| 0.004854
|
#!/bin/python
import suds
from suds.client import Client
u = 'http://owsx:owsx_user@l
|
ocalhost:8080/orawsv/OWSX/OWSX_UTL/PAY_RAISE'
h = {'User-Agent':'Mozilla/4.0'}
client = Client(
|
u)
print(client)
|
xeroz/admin-django
|
apps/players/migrations/0007_auto_20170806_1435.py
|
Python
|
mit
| 561
| 0.001783
|
# -*- coding: utf-8 -*-
#
|
Generated by Django 1.11.1 on 2017-08-06 14:35
from __future__ import unicode_literals
from d
|
jango.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('players', '0006_statistics'),
]
operations = [
migrations.AlterField(
model_name='statistics',
name='player',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='statistic', to='players.Player'),
),
]
|
federicotdn/piter
|
src/colors.py
|
Python
|
gpl-3.0
| 477
| 0.069182
|
import curses
from pygments import token
class ColorProfile:
def __init__(self):
self._map = {
token.Text : 4,
token.Keyword : 3,
token.Comment : 2,
token.Number : 5,
token.Name : 6,
token.Error : 7,
token.Punctuation : 8,
token.Whitespace : 0
}
def color_for(self, attr):
#this should return a curses color_pair depending
#on
|
'attr', which is a Pygments token type
if not attr in self._map:
return 1
|
else:
return self._map[attr]
|
0909023/Dev6B_English_Website
|
DjangoWebProject1/DjangoWebProject1/app/models.py
|
Python
|
mit
| 1,227
| 0.00978
|
"""
Definition of models.
"""
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# python manage.py makemigrations app
progress = models.IntegerField(default=0)
progress_v1 = models.IntegerField(default=0)
progress_v2 = models.IntegerField(default=0)
progress_v3 = models.IntegerField(default=0)
progress_g1 = models.IntegerField(default=0)
progress_g2 = models.IntegerField(default=0)
progress_g3 = models.IntegerField(default=0)
progress_v1.contribute_to_class(User, 'progress_v1')
progress_v2.contribute_to_class(User, 'progress_v2')
progress_v3.contribute_to_class(User, 'progress_v3')
progress_g1.
|
contribute_to_class(User, 'progress_g1')
progress_g2.contribute_to_class(User, 'progress_g2')
progress_g3.cont
|
ribute_to_class(User, 'progress_g3')
progress.contribute_to_class(User, 'progress')
class exersize(models.Model):
category = models.CharField(max_length = 50, default=0)
question = models.CharField(max_length = 150, default=0)
level = models.CharField(max_length = 2, default=0)
class exersizeAnswerJoin(models.Model):
answer = models.CharField(max_length = 30, default=0)
ex_id = models.ForeignKey(exersize, on_delete = models.CASCADE)
|
DanielAndreasen/SWEET-Cat
|
checkDuplicates.py
|
Python
|
mit
| 4,561
| 0.000658
|
import pandas as pd
import numpy as np
import warnings
from clint.textui import colored
warnings.simplefilter("ignore")
class Sweetcat:
"""Load SWEET-Cat database"""
def __init__(self):
# self.fname_sc = 'WEBSITE_online_EU-NASA_full_database.rdb'
self.fname_sc = 'WEBSITE_online_EU-NASA_full_database_clean.rdb'
# Loading the SweetCat database
self.readSC()
def readSC(self):
# TODO: Use the ra and dec, and match with coordinates instead of name
# stored in self.coordinates.
# Read the current version of SWEET-Cat
names_ = ['name', 'hd', 'ra', 'dec', 'V', 'Verr', 'p', 'perr',
'pflag', 'Teff', 'Tefferr', 'logg', 'logger',
'n1', 'n2', 'vt', 'vterr', 'feh', 'feherr', 'M', 'Merr',
'author', 'link', 'source', 'update', 'comment', 'database',
'n3']
# SC = pd.read_csv('WEBSITE_online.rdb', delimiter='\t', names=names_)
SC = pd.read_csv(self.fname_sc, delimiter='\t', names=names_)
# Clean star names
self.sc_names = [x.lower().replace(' ', '').replace('-', '') for x in SC.name]
self.sc_names = list(map(str.strip, self.sc_names))
# Original star names
self.sc_names_orig = [x.strip() for x in SC.name]
# Coordinates of the stars in SWEET-Cat
self.coordinates = SC.loc[:, ['ra', 'dec']]
# SWEET-Cat (used to automatically update the database label)
self.SC = SC
if __name__ == '__main__':
# Loading SWEET Cat
sc = Sweetcat()
# Check for duplicates, subset of columns can be changed
print('\nChecking for possible duplicates ...')
print(colored.green('Same RA/DEC'))
print(sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)][['name',
'hd',
'ra',
'dec']])
print(colored.green('\nSame HD number'))
print(sc.SC[sc.SC.duplicated(['hd'],
keep=False)].dropna(subset=['hd'])[['name',
'hd',
'ra',
'dec']])
print(colored.green(
|
'\nApproximate RA/DEC ...'))
# Remove the characters after the . in the coordinates
|
ra_sc = sc.SC['ra'].values.tolist()
ra_approx = list(map(lambda i: i[:i.find('.')], ra_sc))
dec_sc = sc.SC['dec'].values.tolist()
dec_approx = list(map(lambda i: i[:i.find('.')], dec_sc))
# Check for similar RA/DEC
idx_duplicate = []
for idx, (ra, dec) in enumerate(zip(ra_approx, dec_approx)):
dupli = list(np.where((np.array(ra_approx) == ra) &
(np.array(dec_approx) == dec))[0])
if len(dupli) > 1:
idx_duplicate.append(dupli)
# Print possible duplicates
print(colored.green('RA/DEC are similar: possible duplicates\n'))
unique_duplicate = set([tuple(t) for t in idx_duplicate])
for idx in unique_duplicate:
print(sc.SC.iloc[list(idx)][['name', 'hd', 'ra', 'dec']])
# Remove the -1.0 in microturbulence and its error
sc.SC[sc.SC['vt'] < 0.0][['name', 'hd', 'ra', 'dec',
'vt', 'vterr', 'author', 'link']]
# Change the value of a given cell
# sc.SC.at[9, 'vt'] = 1.44
# sc.SC.at[9, 'vterr'] = np.nan
# Uncomment some of the following lines to remove duplicates
# Indexes of the duplicates
# indexes = sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)].index
# Remove a row (HD21749)
# new_sc = sc.SC.drop([2728])
# # Write the new file
# # Convert Tefferr column to integers
# new_sc['Tefferr'] = new_sc['Tefferr'].fillna('-111111')
# new_sc['Tefferr'] = new_sc['Tefferr'].astype(int).replace(-111111, 'NULL')
# # Replace NaN by NULL
# new_sc.fillna(value='NULL', inplace=True)
# new_sc.to_csv('WEBSITE_online_EU-NASA_full_database_clean_09-03-2020.rdb',
# sep='\t', index=False, header=False)
# # Select only the EU data
# sc_EU = new_sc[new_sc['database'].str.contains('EU')]
# # Drop the database column
# sc_like_old = sc_EU.drop(columns=['database'])
# sc_like_old.to_csv('WEBSITE_online_EU-updated_09-03-2020.rdb',
# sep='\t', index=False, header=False)
|
imminent-tuba/thesis
|
server/chatterbot/chatterbot/adapters/logic/no_knowledge_adapter.py
|
Python
|
mit
| 659
| 0
|
from .logic import LogicAdapt
|
er
class NoKnowledgeAdapter(LogicAdapter):
"""
This is a system adapter that is automatically added
to the list of logic adapters durring initialization.
This adapter is placed at the beginning of the list
to be given the highest priority.
"""
def process(self, statement):
"""
If there are no known responses in the d
|
atabase,
then a confidence of 1 should be returned with
the input statement.
Otherwise, a confidence of 0 should be returned.
"""
if self.context.storage.count():
return 0, statement
return 1, statement
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/AlignIO/NexusIO.py
|
Python
|
gpl-2.0
| 7,881
| 0.002665
|
# Copyright 2008-2010 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.AlignIO support for the "nexus" file format.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
See also the Bio.Nexus module (which this code calls internally),
as this offers more than just accessing the alignment or its
sequences as SeqRecord objects.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
from Bio.SeqRecord import SeqRecord
from Bio.Nexus import Nexus
from Bio.Align import MultipleSeqAlignment
from Bio.AlignIO.Interfaces import AlignmentWriter
from Bio import Alphabet
__docformat__ = "restructuredtext en"
# You can get a couple of example files here:
# http://www.molecularevolution.org/resources/fileformats/
# This is a generator function!
def NexusIterator(handle, seq_count=None):
"""Returns SeqRecord objects from a Nexus file.
Thus uses the Bio.Nexus module to do the hard work.
You are expected to call this function via Bio.SeqIO or Bio.AlignIO
(and not use it directly).
NOTE - We only expect ONE alignment matrix per Nexus file,
meaning this iterator will only yield one MultipleSeqAlignment.
"""
n = Nexus.Nexus(handle)
if not n.matrix:
# No alignment found
raise StopIteration
# Bio.Nexus deals with duplicated names by adding a '.copy' suffix.
# The original names and the modified names are kept in these two lists:
assert len(n.unaltered_taxlabels) == len(n.taxlabels)
if seq_count and seq_count != len(n.unaltered_taxlabels):
raise ValueError("Found %i sequences, but seq_count=%i"
% (len(n.unaltered_taxlabels), seq_count))
# TODO - Can we extract any annotation too?
records = (SeqRecord(n.matrix[new_name], id=new_name,
|
name=old_name, description="")
for old_name, new_name
in zip(n.unaltered_taxlabels, n.taxlabels))
# All done
yield MultipleSeqAlignment(records, n.alphabet)
class NexusWriter(AlignmentWriter):
|
"""Nexus alignment writer.
Note that Nexus files are only expected to hold ONE alignment
matrix.
You are expected to call this class via the Bio.AlignIO.write() or
Bio.SeqIO.write() functions.
"""
def write_file(self, alignments):
"""Use this to write an entire file containing the given alignments.
Arguments:
- alignments - A list or iterator returning MultipleSeqAlignment objects.
This should hold ONE and only one alignment.
"""
align_iter = iter(alignments) # Could have been a list
try:
first_alignment = next(align_iter)
except StopIteration:
first_alignment = None
if first_alignment is None:
# Nothing to write!
return 0
# Check there is only one alignment...
try:
second_alignment = next(align_iter)
except StopIteration:
second_alignment = None
if second_alignment is not None:
raise ValueError("We can only write one Alignment to a Nexus file.")
# Good. Actually write the single alignment,
self.write_alignment(first_alignment)
return 1 # we only support writing one alignment!
def write_alignment(self, alignment):
# Creates an empty Nexus object, adds the sequences,
# and then gets Nexus to prepare the output.
if len(alignment) == 0:
raise ValueError("Must have at least one sequence")
columns = alignment.get_alignment_length()
if columns == 0:
raise ValueError("Non-empty sequences are required")
minimal_record = "#NEXUS\nbegin data; dimensions ntax=0 nchar=0; " \
+ "format datatype=%s; end;" \
% self._classify_alphabet_for_nexus(alignment._alphabet)
n = Nexus.Nexus(minimal_record)
n.alphabet = alignment._alphabet
for record in alignment:
n.add_sequence(record.id, str(record.seq))
# For smaller alignments, don't bother to interleave.
# For larger alginments, interleave to avoid very long lines
# in the output - something MrBayes can't handle.
# TODO - Default to always interleaving?
n.write_nexus_data(self.handle, interleave=(columns > 1000))
def _classify_alphabet_for_nexus(self, alphabet):
"""Returns 'protein', 'dna', 'rna' based on the alphabet (PRIVATE).
Raises an exception if this is not possible."""
# Get the base alphabet (underneath any Gapped or StopCodon encoding)
a = Alphabet._get_base_alphabet(alphabet)
"""condition loop below was edited by Ambuj Kumar in order to make
it align with ConCat"""
if 'Alphabet.Alphabet' not in str(type(a)) and 'Alphabet.ProteinAlphabet' not in str(type(a)) and 'Alphabet.DNAAlphabet' not in str(type(a)) and 'Alphabet.RNAAlphabet' not in str(type(a)) and 'Alphabet.Gapped' not in str(type(a)):
raise TypeError("Invalid alphabet")
elif 'Protein' in str(type(a)):
return "protein"
elif 'DNA' in str(type(a)):
return "dna"
elif 'RNA' in str(type(a)):
return "rna"
else:
# Must be something like NucleotideAlphabet or
# just the generic Alphabet (default for fasta files)
raise ValueError("Need a DNA, RNA or Protein alphabet")
if __name__ == "__main__":
from Bio._py3k import StringIO
print("Quick self test")
print("")
print("Repeated names without a TAXA block")
handle = StringIO("""#NEXUS
[TITLE: NoName]
begin data;
dimensions ntax=4 nchar=50;
format interleave datatype=protein gap=- symbols="FSTNKEYVQMCLAWPHDRIG";
matrix
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ----
ALEU_HORVU MAHARVLLLA LAVLATAAVA VASSSSFADS NPIRPVTDRA ASTLESAVLG
CATH_HUMAN ------MWAT LPLLCAGAWL LGV------- -PVCGAAELS VNSLEK----
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ---X
;
end;
""")
for a in NexusIterator(handle):
print(a)
for r in a:
print("%r %s %s" % (r.seq, r.name, r.id))
print("Done")
print("")
print("Repeated names with a TAXA block")
handle = StringIO("""#NEXUS
[TITLE: NoName]
begin taxa
CYS1_DICDI
ALEU_HORVU
CATH_HUMAN
CYS1_DICDI;
end;
begin data;
dimensions ntax=4 nchar=50;
format interleave datatype=protein gap=- symbols="FSTNKEYVQMCLAWPHDRIG";
matrix
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ----
ALEU_HORVU MAHARVLLLA LAVLATAAVA VASSSSFADS NPIRPVTDRA ASTLESAVLG
CATH_HUMAN ------MWAT LPLLCAGAWL LGV------- -PVCGAAELS VNSLEK----
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ---X
;
end;
""")
for a in NexusIterator(handle):
print(a)
for r in a:
print("%r %s %s" % (r.seq, r.name, r.id))
print("Done")
print("")
print("Reading an empty file")
assert 0 == len(list(NexusIterator(StringIO())))
print("Done")
print("")
print("Writing...")
handle = StringIO()
NexusWriter(handle).write_file([a])
handle.seek(0)
print(handle.read())
handle = StringIO()
try:
NexusWriter(handle).write_file([a, a])
assert False, "Should have rejected more than one alignment!"
except ValueError:
pass
|
dimagi/commcare-hq
|
corehq/sql_db/__init__.py
|
Python
|
bsd-3-clause
| 5,334
| 0.002437
|
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.db import connections as django_connections, DEFAULT_DB_ALIAS, router
from corehq.sql_db.exceptions import PartitionValidationError
@checks.register('settings')
def custom_db_checks(app_configs, **kwargs):
errors = []
custom_db_settings = [
'SYNCLOGS_SQL_DB_ALIAS'
]
for setting in custom_db_settings:
default = getattr(settings, setting) == DEFAULT_DB_ALIAS
custom = not default and getattr(settings, setting) in settings.DATABASES
if not (default or custom):
errors.append(
checks.Error('settings.{} should either be "default" for a default database'
'or a valid database defined in settings.DATABASES'.format(setting))
)
return errors
@checks.register('settings')
def check_plproxy_config(app_configs, **kwargs):
allowed_keys = {'PROXY_FOR_STANDBYS', 'PROXY', 'SHARDS', 'PLPROXY_HOST'}
messages = []
for db, config in settings.DATABASES.items():
if 'PLPROXY' in config:
unknown_keys = set(config['PLPROXY']) - allowed_keys
if unknown_keys:
messages.append(checks.Warning(
f'Unrecognised PLPROXY settings: {unknown_keys}'
))
try:
from corehq.sql_db.config import plproxy_config, _get_standby_plproxy_config
if plproxy_config:
_get_standby_plproxy_config(plproxy_config)
except PartitionValidationError as e:
messages.append(checks.Error(f'Error in PLPROXY standby configuration: {e}'))
return messages
@checks.register('settings')
def check_standby_configs(app_configs, **kwargs):
standby_to_master = {
db: config.get('STANDBY', {}).get('MASTER')
for db, config in settings.DATABASES.items()
if config.get('STANDBY', {}).get('MASTER')
}
all_masters = {
db for db, config in settings.DATABASES.items()
if 'STANDBY' not in config and 'HQ_ACCEPTABLE_STANDBY_DELAY' not in config
}
errors = []
custom_db_settings = [
'REPORTING_DATABASES',
'LOAD_BALANCED_APPS'
]
for setting_name in custom_db_settings:
setting = getattr(settings, setting_name)
if not setting:
continue
for key, config in setting.items():
if 'READ' in config:
read_dbs = {db for db, weight in config['READ']}
masters = read_dbs & all_masters
standby_masters = {
standby_to_master[db]
for db in read_dbs
if db in standby_to_master
}
if len(masters | standby_masters) > 1:
errors.append(checks.Error(
'"settings.{}.{}" refers to multiple master databases. All READ database'
'must be refer to the same master database.'.format(setting_name, key)
))
return errors
@checks.register(checks.Tags.database, deploy=True)
def check_standby_databases(app_configs, **kwargs):
from corehq.sql_db.util import get_standby_databases
standbys = {
db
for db, config in settings.DATABASES.items()
if 'STANDBY' in config or 'HQ_ACCEPTABLE_STANDBY_DELAY' in config
}
confirmed_standbys = get_standby_databases()
badly_configured = standbys - confirmed_standbys
if badly_configured:
return [
checks.Error("Some databases configured as STANDBY are not in recovery mode: {}".format(
', '.join(badly_configured)
))
]
return []
@checks.register(checks.Tags.database, deploy=True)
def check_db_tables(app_configs, **kwargs):
from corehq.sql_db.models import PartitionedModel
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
errors = []
# some apps only apply to specific en
|
vs
env_specific_apps = {
'icds_reports': settings.ICDS_ENVS,
'aaa': ('none',),
}
ignored_models = [
'DeprecatedXFormAttachmentSQL'
]
def _check_model(model_class, using=None):
db = using or router.db_for_read(model_class)
|
try:
with django_connections[db].cursor() as cursor:
cursor.execute("SELECT %s::regclass", [model_class._meta.db_table])
except Exception as e:
errors.append(checks.Error('checks.Error querying model on database "{}": "{}.{}": {}.{}({})'.format(
using or DEFAULT_DB_ALIAS,
model_class._meta.app_label, model_class.__name__,
e.__class__.__module__, e.__class__.__name__,
e
)))
for model in apps.get_models():
app_label = model._meta.app_label
enabled_envs = env_specific_apps.get(app_label)
if enabled_envs and settings.SERVER_ENVIRONMENT not in enabled_envs:
continue
if model.__name__ in ignored_models or not model._meta.managed:
continue
if issubclass(model, PartitionedModel):
for db in get_db_aliases_for_partitioned_query():
_check_model(model, using=db)
else:
_check_model(model)
return errors
|
marineam/nagcat
|
python/snapy/netsnmp/unittests/test_netsnmp.py
|
Python
|
apache-2.0
| 4,936
| 0.003241
|
# snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import time
from twisted.trial import unittest
from snapy.netsnmp.unittests import TestCase
from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID
class Result(object):
"""Container for async results"""
value = None
def set_result(value, result):
result.value = value
class TestSessionV1(TestCase):
version = "1"
bulk = False
basics = [
(OID(".1.3.6.1.4.2.1.1"), 1),
(OID(".1.3.6.1.4.2.1.2"), -1),
(OID(".1.3.6.1.4.2.1.3"), 1),
(OID(".1.3.6.1.4.2.1.4"), "test value"),
]
def setUpSession(self, address):
self.session = Session(
version=self.version,
community="public",
peername=address,
_use_bulk=self.bulk)
self.session.open()
def tearDownSession(self):
self.session.close()
def test_sget(self):
result = self.session.sget([x for x,v in self.basics])
self.assertEquals(result, self.basics)
return self.finishGet()
def test_get_small(self):
result = Result()
self.session.get([x for x,v in self.basics], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishGet()
def test_get_big(self):
oids = []
for i in xrange(1, 100):
oids.append(OID((1,3,6,1,4,2,4,i)))
result = Result()
self.session.get(oids, set_result, result)
self.session.wait()
result = dict(result.value)
for oid in oids:
assert oid in result
assert result[oid] == "data data data data"
return self.finishGet()
def test_walk_tree(self):
result = Result()
self.session.walk([".1.3.6.1.4.2.1"], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishWalk()
def test_walk_leaf(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result)
self.session.wait()
self.assertEquals(result.value, [(oid, 1)])
return self.finishGet()
def test_walk_strict(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result, strict=True)
self.session.wait()
self.assertEquals(result.value, [])
return self.finishStrictWalk()
def test_sysDescr(self):
result = self.session.sget([OID("SNMPv2-MIB::sysDescr.0")])
self.assert_(result)
self.assertIsInstance(result[0][1], str)
s
|
elf.assert_(len(result[0][1]) > 0)
return self.finishGet()
class TestSessionV2c(TestSessionV1):
version = "2c"
def test_hrSystemDate(self):
# This is a special string that gets formatted using the
# MIB's DISPLAY-HINT value. Also, strip off everything
# other than the date an
|
d hour to avoid a race condition.
# And one more quirk, these dates are not zero padded
# so we must format the date manually, whee...
now = time.localtime()
now = "%d-%d-%d,%d" % (now[0], now[1], now[2], now[3])
result = self.session.sget([OID(".1.3.6.1.2.1.25.1.2.0")])
self.assert_(result)
value = result[0][1].split(':', 1)[0]
self.assertEquals(value, now)
return self.finishGet()
class TestSessionV2cBulk(TestSessionV2c):
bulk = True
class TestTimeoutsV1(unittest.TestCase):
version = "1"
def setUp(self):
self.session = Session(
version=self.version,
community="public",
peername="udp:127.0.0.1:9",
retries=0, timeout=0.1)
self.session.open()
def test_sget(self):
self.assertRaises(SnmpError, self.session.sget, [".1.3.6.1.4.2.1.1"])
def test_get(self):
result = Result()
self.session.get([".1.3.6.1.4.2.1.1"], set_result, result)
self.session.wait()
assert isinstance(result.value, SnmpTimeout)
def tearDown(self):
self.session.close()
class TestTimeoutsV2c(TestTimeoutsV1):
version = "2c"
class TestOID(unittest.TestCase):
def test_oid_name(self):
oid = OID("1.3.6.1.2.1.1.1.0")
self.assertEquals(oid, OID("SNMPv2-MIB::sysDescr.0"))
self.assertEquals(oid, OID("sysDescr.0"))
|
neuropower/neuropower
|
neuropower/apps/blog/views.py
|
Python
|
mit
| 393
| 0.015267
|
from __future__ import unicode_literals
import sys
|
sys.path = sys.path[1:]
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response
from django.core.mail import send_mail
from django.conf import settings
import os
def blog(request):
context = {}
context['thanks'] = Tru
|
e
return render(request, "blog/blog.html", context)
|
CodeHuntersLab/RaspberryPi
|
WEB/Raspberry/manage.py
|
Python
|
gpl-3.0
| 807
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Raspberry.settings")
try:
from dja
|
ngo.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avo
|
id masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
lachouettecoop/chouette-odoo
|
addons/chouette/pos_meal_voucher/models/__init__.py
|
Python
|
gpl-2.0
| 168
| 0
|
f
|
rom . import account_journal
from . import barcode_rule
from . import pos_config
from .
|
import pos_order
from . import product_category
from . import product_template
|
fhqgfss/MoHa
|
moha/system/state.py
|
Python
|
mit
| 741
| 0.017544
|
import numpy as np
from moha.system.basis import SlaterDeterminant
class state(o
|
bject):
def __init__(self):
pass
class WaveFunction(state):
def __init__(self):
pass
class HFWaveFunction(WaveFunction):
"""
"""
def __init__(self,dim,occ,coefficient,density,fock,eorbitals,Eelec,Etot):
self.dim = dim
self.occ = occ
self.coefficient = coefficient
self.density = density
self.fock = fock
self.eorbitals = eorbitals
self.Eelec = Eelec
self.Etot = Etot
|
@property
def configuration(self):
c = {}
for spin in self.occ:
c[spin] = [1]*self.occ[spin] + [0]*(self.dim - self.occ[spin])
return c
|
davidkleiven/PLOD
|
setup.py
|
Python
|
mit
| 534
| 0.001873
|
from setuptools import setup
'''
The packages subprocess and tkinter is al
|
so required from the standard library
'''
setup(
name='PLOD',
version='1.0',
description='Matplotlib plot designer',
author='David Kleiven',
licence='MIT',
author_email='dav
|
idkleiven446@gmail.com',
install_requires=['numpy', 'matplotlib'],
url='https://github.com/davidkleiven/PLOD',
classifiers=[
'Programming Language :: Python :: 3',
],
#py_modules=['plotHandler', 'controlGUI'],
packages=['PLOD']
)
|
chrfrantz/op-papers
|
oosd/week02/blackjack/hand.py
|
Python
|
gpl-3.0
| 952
| 0.003151
|
import deck
class
|
Hand:
def __init__(self):
self.cards = []
def
|
add(self, card):
self.cards.append(card)
def size(self):
return len(self.cards)
def score(self):
# sperate cards into aces and others
regular_cards = [c for c in self.cards if c.value != "A"]
aces = [c for c in self.cards if c.value == "A"]
#tally up regular card values
points = 0
for c in regular_cards:
if isinstance(c.value, basestring):
points += 10
else:
points += c.value
# now add in aces
for c in aces:
if points + 11 <= 21:
points += 11
else:
points += 1
return points
def __repr__(self):
hand_string = ""
for card in self.cards:
hand_string += str(card)
hand_string += " "
return hand_string
|
googleapis/python-dialogflow-cx
|
google/cloud/dialogflowcx_v3/services/flows/transports/base.py
|
Python
|
apache-2.0
| 9,463
| 0.001585
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3.types import flow
from google.cloud.dialogflowcx_v3.types import flow as gcdc_flow
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class FlowsTransport(abc.ABC):
"""Abstract transport class for Flows."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_flow: gapic_v1.method.wrap_method(
self.create_flow, default_timeout=None, client_info=client_info,
),
self.delete_flow: gapic_v1.method.wrap_method(
self.delete_flow, default_timeout=None, client_info=client_info,
),
self.list_flows: gapic_v1.method.wrap_method(
self.list_flows, default_ti
|
meout=None, client_info=client_info,
),
self.get_flow: gapic_v1.method.wrap_method(
self.get_flow, default_timeout=None, client_info=client_info,
),
self.update_flow: gapic_v1.method.wrap_method(
self.update_flow, default_timeout=None, client_info=client_info,
),
self.train_flow: gapic_v1.method.wrap_method(
self.train_flow, def
|
ault_timeout=None, client_info=client_info,
),
self.validate_flow: gapic_v1.method.wrap_method(
self.validate_flow, default_timeout=None, client_info=client_info,
),
self.get_flow_validation_result: gapic_v1.method.wrap_method(
self.get_flow_validation_result,
default_timeout=None,
client_info=client_info,
),
self.import_flow: gapic_v1.method.wrap_method(
self.import_flow, default_timeout=None, client_info=client_info,
),
self.export_flow: gapic_v1.method.wrap_method(
self.export_flow, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_flow(
self,
) -> Callable[
[gcdc_flow.CreateFlowRequest], Union[gcdc_flow.Flow, Awaitable[gcdc_flow.Flow]]
]:
raise NotImplementedError()
@property
def delete_flow(
self,
) -> Callable[
[flow.DeleteFlowRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
]:
raise NotImplementedError()
@property
def list_flows(
self,
) -> Callable[
[flow.ListFlowsRequest],
Union[flow.ListFlowsResponse, Awaitable[flow.ListFlowsResponse]],
]:
raise NotImplementedError()
@property
def get_flow(
self,
) -> Callable[[flow.GetFlowRequest], Union[flow.Flow, Awaitable[flow.Flow]]]:
raise NotImplementedError()
@property
def update_flow(
self,
) -> Callable[
[gcdc_flow.UpdateFlowReque
|
HelsinkiHacklab/urpobotti
|
python/motorctrl.py
|
Python
|
mit
| 3,257
| 0.00522
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
from zmq.eventloop import ioloop as ioloop_mod
import zmqdecorators
import time
SERVICE_NAME = "urpobot.motor"
SERVICE_PORT = 7575
SIGNALS_PORT = 7576
# How long to wait for new commands before stopping automatically
COMMAND_GRACE_TIME = 0.250
class motorserver(zmqdecorators.service):
def __init__(self, ser
|
vice_name, service_port, serialport):
super(motorserver, self).__init__(service_name, service_port)
self.serial_port = serialport
self.input_buffer = ""
self.evthandler = ioloop_mod.IOLoop.instance().add_handler(self.serial_port.fileno(), self.handle_serial_event, ioloo
|
p_mod.IOLoop.instance().READ)
self.last_command_time = time.time()
self.pcb = ioloop_mod.PeriodicCallback(self.check_data_reveived, COMMAND_GRACE_TIME)
self.pcb.start()
def check_data_reveived(self, *args):
if (time.time() - self.last_command_time > COMMAND_GRACE_TIME):
self._setspeeds(0,0)
def _setspeeds(self, m1speed, m2speed):
self.serial_port.write("S%04X%04X\n" % ((m1speed & 0xffff), (m2speed & 0xffff)))
@zmqdecorators.method()
def setspeeds(self, resp, m1speed, m2speed):
self.last_command_time = time.time()
#print("Got speeds %s,%s" % (m1speed, m2speed))
self._setspeeds(m1speed, m2speed)
# TODO: actually handle ACK/NACK somehow (we need to read it from the serialport but we can't block while waiting for it...)
resp.send("ACK")
def handle_serial_event(self, fd, events):
# Copied from arbus that was thread based
if not self.serial_port.inWaiting():
# Don't try to read if there is no data, instead sleep (yield) a bit
time.sleep(0)
return
data = self.serial_port.read(1)
if len(data) == 0:
return
#print("DEBUG: data=%s" % data)
# Put the data into inpit buffer and check for CRLF
self.input_buffer += data
# Trim prefix NULLs and linebreaks
self.input_buffer = self.input_buffer.lstrip(chr(0x0) + "\r\n")
#print "input_buffer=%s" % repr(self.input_buffer)
if ( len(self.input_buffer) > 1
and self.input_buffer[-2:] == "\r\n"):
# Got a message, parse it (sans the CRLF) and empty the buffer
self.message_received(self.input_buffer[:-2])
self.input_buffer = ""
def message_received(self, message):
#print("DEBUG: msg=%s" % message)
try:
# Currently we have no incoming messages from this board
pass
except Exception as e:
print "message_received exception: Got exception %s" % repr(e)
# Ignore indexerrors, they just mean we could not parse the command
pass
pass
def cleanup(self):
print("Cleanup called")
self._setspeeds(0,0)
def run(self):
print("Starting motorserver")
super(motorserver, self).run()
if __name__ == "__main__":
import serial
import sys,os
port = serial.Serial(sys.argv[1], 115200, xonxoff=False, timeout=0.01)
instance = motorserver(SERVICE_NAME, SERVICE_PORT, port)
instance.run()
|
anithag/Shield
|
Authenticate.py
|
Python
|
gpl-2.0
| 851
| 0.025852
|
# Include the Dropbox SDK
import dropbox
class DropBoxSDK():
# Get your app key and secret from the Dropbox developer website
app_key = ''
app_secret = ''
def authorize(self):
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(self.app_key, self.app_secret)
authorize_url = flow.start()
# Have the user sign
|
in and authorize this token
authorize_url = flow.start()
print '1. Go to: ' + authorize_url
print '2. Click "Allow" (you might have to log in first)'
print '3. Copy the authorization code.'
code = raw_input("Enter the authorization code here: ").strip()
# This will fail if the user enters an invalid authorization code
self.access_token, user_id = flow.finish(code)
client = dropbox.client.DropboxClient(self.access_token)
#print 'linked account: ', client.account_info()
return client
|
|
alculquicondor/psqlparse
|
setup.py
|
Python
|
bsd-3-clause
| 1,405
| 0
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import os.path
import subprocess
import sys
libpg_query = os.path.join('.', 'libpg_query')
class PSqlParseBuildExt(build_ext):
def run(self):
return_code = subprocess.call(['make', '-C', libpg_query, 'build'])
if return_code:
sys.stderr.write('''
An error occurred during extension building.
Make sure you have bison and flex installed on your system.
''')
sys.exit(return_code)
build_ext.run(self)
USE_CYTHON = bool(os.environ.get('USE_CYTHON'))
ext = '.pyx' if USE_CYTHON else '.c'
libraries = ['pg_query']
extensions = [
Extension('psqlparse.parser',
['psqlparse/parser' + ext],
libraries=libraries,
include_dirs=[libpg_query],
library_dirs=[libpg_query])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='psqlparse',
version='1.0-rc7',
url='https://github.com/alculquicondor/psqlparse',
author='Aldo Culquicondor',
author_email='aldo@amigocloud.com',
description='Parse SQL queries using the Postgr
|
eSQL query parser',
install_requires=['six'],
license='BSD',
cmdclass={'build_ext': PSqlParseBuildExt},
pack
|
ages=['psqlparse', 'psqlparse.nodes'],
ext_modules=extensions)
|
koonsolo/MysticMine
|
monorail/koon/app.py
|
Python
|
mit
| 4,111
| 0.016784
|
import sys
import gc
import pygame
from pygame.locals import *
from input import *
import snd
TICKS_PER_SECOND = 25
GAMETICKS = 1000 / TICKS_PER_SECOND
def set_game_speed( slowdown ):
global TICKS_PER_SECOND
global GAMETICKS
TICKS_PER_SECOND = int( 25 * slowdown )
GAMETICKS = 1000 / TICKS_PER_SECOND
class Game:
def __init__( self, name, configuration ):
self.config = configuration
self.name = name
def init_pygame( self ):
snd.pre_init()
# Init the display
pygame.init()
self.userinput = UserInput()
if not self.config.is_fullscreen:
pygame.display.set_mode( self.config.resolution )
else:
pygame.display.set_mode( self.config.resolution, pygame.FULLSCREEN )
pygame.display.set_caption( self.name )
# Init the input
pygame.mouse.set_visible( False )
pygame.event.set_grab( False )
snd.init()
def deinit_pygame( self ):
snd.deinit()
pygame.quit()
def before_gameloop( self ):
pass
def after_gameloop( self ):
pass
def run( self ):
try:
self.init_pygame()
self.before_gameloop()
self.fps = 0
frame_count = 0
next_game_tick = pygame.time.get_ticks()
next_half_second = pygame.time.get_ticks()
# main loop
self.game_is_done = False
while not self.game_is_done:
# events
self.handle_events()
# game tick
loop_count = 0
while pygame.time.get_ticks() > next_game_tick and loop_count < 4:
x, y = pygame.mouse.get_pos()
self.userinput.mouse.feed_pos( Vec2D(x, y) )
self.do_tick( self.userinput )
self.userinput.update()
next_game_tick += GAMETICKS
loop_count += 1
## gc.collect()
if loop_count >= 4: # don't overdo the ticks
next_game_tick = pygame.time.get_ticks()
# render
time_sec = pygame.time.get_ticks() * 0.001
interpol = 1 - ((next_game_tick - pygame.time.get_ticks()) / float(GAMETICKS))
self.render(pygame.display.get_surface(), interpol, time_sec )
pygame.display.flip()
frame_count += 1
if pygame.time.get_ticks() > next_half_second:
self.fps = 2 * frame_count
frame_count = 0
next_half_second += 500
self.after_gameloop()
self.deinit_pygame()
except:
self.deinit_pygame()
print "Unexpected error:", sys.exc_info()[0]
raise
def handle_events( self ):
for event in pygame.event.get():
if event.type == QUIT:
self.game_is_done = True
elif event.type == KEYDOWN:
self.userinput.key.feed_down( event.key )
self.userinput.key.feed_char( event.unicode )
elif event.type == KEYUP:
self.userinput.
|
key.feed_up( event.key )
elif event.type == MOUSEBUTTONDOWN:
self.userinput.mouse.feed_down( event.button )
self.state.mouse_down( event.button )
elif event.type == MOUSEBUTTONUP:
|
self.userinput.mouse.feed_up( event.button )
elif event.type == JOYBUTTONDOWN:
self.userinput.joys[event.joy].feed_down( event.button )
elif event.type == JOYBUTTONUP:
self.userinput.joys[event.joy].feed_up( event.button )
def draw_fps( self, surface ):
font = pygame.font.Font( None, 20 )
render_text = font.render( str(self.fps), 0, (255,255,255) )
surface.blit( render_text, (10,10) )
|
apophys/err
|
errbot/plugin_wizard.py
|
Python
|
gpl-3.0
| 4,848
| 0.001444
|
#!/usr/bin/env python
import errno
import os
import re
import sys
from configparser import ConfigParser
import jinja2
from errbot.version import VERSION
def new_plugin_wizard(directory=None):
"""
Start the wizard to create a new plugin in the current working directory.
"""
if directory is None:
print("This wizard will crea
|
te a new plugin for you in the c
|
urrent directory.")
directory = os.getcwd()
else:
print(f'This wizard will create a new plugin for you in "{directory}".')
if os.path.exists(directory) and not os.path.isdir(directory):
print(f'Error: The path "{directory}" exists but it isn\'t a directory')
sys.exit(1)
name = ask(
"What should the name of your new plugin be?",
validation_regex=r"^[a-zA-Z][a-zA-Z0-9 _-]*$",
).strip()
module_name = name.lower().replace(" ", "_")
directory_name = name.lower().replace(" ", "-")
class_name = "".join([s.capitalize() for s in name.lower().split(" ")])
description = ask(
"What may I use as a short (one-line) description of your plugin?"
)
python_version = "3"
errbot_min_version = ask(
f"Which minimum version of errbot will your plugin work with? "
f"Leave blank to support any version or input CURRENT to select "
f"the current version {VERSION}."
).strip()
if errbot_min_version.upper() == "CURRENT":
errbot_min_version = VERSION
errbot_max_version = ask(
f"Which maximum version of errbot will your plugin work with? "
f"Leave blank to support any version or input CURRENT to select "
f"the current version {VERSION}."
).strip()
if errbot_max_version.upper() == "CURRENT":
errbot_max_version = VERSION
plug = ConfigParser()
plug["Core"] = {
"Name": name,
"Module": module_name,
}
plug["Documentation"] = {
"Description": description,
}
plug["Python"] = {
"Version": python_version,
}
if errbot_max_version != "" or errbot_min_version != "":
plug["Errbot"] = {}
if errbot_min_version != "":
plug["Errbot"]["Min"] = errbot_min_version
if errbot_max_version != "":
plug["Errbot"]["Max"] = errbot_max_version
plugin_path = directory
plugfile_path = os.path.join(plugin_path, module_name + ".plug")
pyfile_path = os.path.join(plugin_path, module_name + ".py")
try:
os.makedirs(plugin_path, mode=0o700)
except IOError as e:
if e.errno != errno.EEXIST:
raise
if os.path.exists(plugfile_path) or os.path.exists(pyfile_path):
path = os.path.join(directory, f"{module_name}.{{py,plug}}")
ask(
f"Warning: A plugin with this name was already found at {path}\n"
f"If you continue, these will be overwritten.\n"
f'Press Ctrl+C to abort now or type in "overwrite" to confirm overwriting of these files.',
valid_responses=["overwrite"],
)
with open(plugfile_path, "w") as f:
plug.write(f)
with open(pyfile_path, "w") as f:
f.write(render_plugin(locals()))
print(f"Success! You'll find your new plugin at '{plugfile_path}'")
print(
"(Don't forget to include a LICENSE file if you are going to publish your plugin)."
)
def ask(question, valid_responses=None, validation_regex=None):
"""
Ask the user for some input. If valid_responses is supplied, the user
must respond with something present in this list.
"""
response = None
print(question)
while True:
response = input("> ")
if valid_responses is not None:
assert isinstance(valid_responses, list)
if response in valid_responses:
break
else:
print(f'Bad input: Please answer one of: {", ".join(valid_responses)}')
elif validation_regex is not None:
m = re.search(validation_regex, response)
if m is None:
print(
f"Bad input: Please respond with something matching this regex: {validation_regex}"
)
else:
break
else:
break
return response
def render_plugin(values):
"""
Render the Jinja template for the plugin with the given values.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "templates")
),
auto_reload=False,
keep_trailing_newline=True,
autoescape=True,
)
template = env.get_template("new_plugin.py.tmpl")
return template.render(**values)
if __name__ == "__main__":
try:
new_plugin_wizard()
except KeyboardInterrupt:
sys.exit(1)
|
clearclaw/xxpaper
|
xxpaper/contents.py
|
Python
|
gpl-3.0
| 590
| 0.030508
|
#! /usr/bin/env python
import logging, logtool
from .page import Page
from .xlate_frame import XlateFrame
LOG = logging.getLogger (__name__)
class Contents:
@logtool.log_call
def __init__ (self, canvas, objects):
self.canvas = canvas
self.objects = objects
@logtool.log_call
def render (self):
with Page (self.canvas) as pg:
for obj in self.objects:
coords = pg.next (obj.asset)
with XlateFrame (self.canvas, obj.tile_type
|
, *coords,
inset_by = "ma
|
rgin"):
# print ("Obj: ", obj.asset)
obj.render ()
|
UVicRocketry/Hybrid-Test-Stand-Control
|
client/hybrid_test_backend.py
|
Python
|
gpl-3.0
| 4,879
| 0.005124
|
from PyQt5 import QtWidgets, uic, QtCore
import sys
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
# Load the UI Page. uic is the thing that lets us use a .ui file
# This only works if the .ui file is in the same directory
super(MainWindow, self).__init__(*args, **kwargs)
uic.loadUi('hybrid_test_gui.ui', self)
# This will connect all the buttons to methods that are called when the
# buttons are clicked by the user.
self.setup_buttons_etc()
## STATES ##
# These will get updated by the GUI if the user clicks a button or
# if the server changes something.
self.state_connected = False
self.state_igniter = False
self.state_MEV = False
self.state_N2OV = False
self.state_N2O = False
self.state_N2 = False
self.state_NCV = False
self.state_RV = False
self.state_VV = False
self.state_abort = False
self.state_run = False
def setup_buttons_etc(self):
# Alright so basically since there is not a "loop" to put methods in that
# you want to update based on things that have changed in the GUI,
# PyQt has these things called signals and slots. They let you connect
# changes or "signals" of objects in the GUI to methods.
# For instance we can connect_btn is a QPushButton from QT Designer,
# which has a signal "clicked". We can "connect" this to a method that
# we want to run when the button is clicked.
self.connect_btn.clicked.connect(self._connect_btn)
# We do this for every button in the GUI. Each button gets a corresponding
# method that has the same name as the button but with an _ in front
# To add a new button:
# Add the button to the GUI in QT Designer
# Give it a nice name
# Add a new line to this method in the form "self.button_name.clicked.connect(self._button_name)"
# Add a new method in below setup_button_etc of the form "def _button_name(self):"
# Any code in that method will be run when the button is clicked!
self.disconnect_btn.clicked.connect(self._disconnect_btn)
self.igniter_btn_toggle.clicked.connect(self._igniter_btn_toggle)
self.MEV_btn_off.clicked.connect(self._MEV_btn_off)
self.MEV_btn_on.clicked.connect(self._MEV_btn_on)
self.N2OV_btn_off.clicked.connect(self._N2OV_btn_off)
self.N2OV_btn_on.clicked.connect(self._N2OV_btn_on)
self.N2O_btn_off.clicked.connect(self._N2O_btn_off)
self.N2O_btn_on.clicked.connect(self._N2O_btn_on)
self.N2_btn_off.clicked.connect(self._N2_btn_off)
self.N2_btn_on.clicked.connect(self._N2_btn_on)
self.NCV_btn_off.clicked.connect(self._NCV_btn_off)
self.NCV_btn_
|
on.clicked.connect(self._NCV_btn_on)
self.RV_btn_off.clicked.connect(self._RV_btn_off)
self.RV_btn_on.clicked.connect(self._RV_btn_on)
self.VV_btn_off.clicked.connect(self._VV_btn_off)
self.VV_btn_on.clicked.connect(self._VV_btn_on)
self.abort_btn.clicke
|
d.connect(self._abort_btn)
self.run_btn.clicked.connect(self._run_btn)
def _connect_btn(self):
self.state_connected = True
print(self.state_connected)
def _disconnect_btn(self):
self.state_connected = False
print(self.state_connected)
def _igniter_btn_toggle(self):
print(self.state_igniter)
def _MEV_btn_off(self):
print(self.state_MEV)
def _MEV_btn_on(self):
print(self.state_MEV)
def _N2OV_btn_off(self):
print(self.state_N2OV)
def _N2OV_btn_on(self):
print(self.state_N2OV)
def _N2O_btn_off(self):
print(self.state_N2O)
def _N2O_btn_on(self):
print(self.state_N2O)
def _N2_btn_off(self):
print(self.state_N2)
def _N2_btn_on(self):
print(self.state_N2)
def _NCV_btn_off(self):
print(self.state_NCV)
def _NCV_btn_on(self):
print(self.state_NCV)
def _RV_btn_off(self):
print(self.state_RV)
def _RV_btn_on(self):
print(self.state_RV)
def _VV_btn_off(self):
print(self.state_VV)
def _VV_btn_on(self):
print(self.state_VV)
def _abort_btn(self):
print(self.state_abort)
def _run_btn(self):
print(self.state_run)
def send_to_server(self):
print("")
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/parsing/yaml/dumper.py
|
Python
|
gpl-3.0
| 2,336
| 0.001284
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import AnsibleUnsafeText
from ansible.vars.hostva
|
rs import HostVars, HostVarsVars
class AnsibleDumper(yaml.SafeDumper):
'''
A simple stub class that allows us to add representers
for our overridden object types.
'''
pass
def represent_hostvars(self, data):
return self.represent_dict(dict(data))
# Note: only want to represent the encrypted
|
data
def represent_vault_encrypted_unicode(self, data):
return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
if PY3:
represent_unicode = yaml.representer.SafeRepresenter.represent_str
else:
represent_unicode = yaml.representer.SafeRepresenter.represent_unicode
AnsibleDumper.add_representer(
AnsibleUnicode,
represent_unicode,
)
AnsibleDumper.add_representer(
AnsibleUnsafeText,
represent_unicode,
)
AnsibleDumper.add_representer(
HostVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
HostVarsVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
AnsibleSequence,
yaml.representer.SafeRepresenter.represent_list,
)
AnsibleDumper.add_representer(
AnsibleMapping,
yaml.representer.SafeRepresenter.represent_dict,
)
AnsibleDumper.add_representer(
AnsibleVaultEncryptedUnicode,
represent_vault_encrypted_unicode,
)
|
hachreak/invenio-access
|
invenio_access/alembic/67ba0de65fbb_create_access_branch.py
|
Python
|
gpl-2.0
| 1,279
| 0
|
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, I
|
nc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organizati
|
on or submit itself to any jurisdiction.
"""Create access branch."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '67ba0de65fbb'
down_revision = None
branch_labels = (u'invenio_access', )
depends_on = 'dbdbc1b19cf2'
def upgrade():
"""Upgrade database."""
def downgrade():
"""Downgrade database."""
|
iamahuman/angr
|
angr/procedures/posix/socket.py
|
Python
|
bsd-2-clause
| 728
| 0.005495
|
import angr
######################################
# socket
################################
|
######
class socket(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, domain, typ, protocol):
c
|
onc_domain = self.state.solver.eval(domain)
conc_typ = self.state.solver.eval(typ)
conc_protocol = self.state.solver.eval(protocol)
if self.state.posix.uid != 0 and conc_typ == 3: # SOCK_RAW
return self.state.libc.ret_errno('EPERM')
nonce = self.state.globals.get('socket_counter', 0) + 1
self.state.globals['socket_counter'] = nonce
fd = self.state.posix.open_socket(('socket', conc_domain, conc_typ, conc_protocol, nonce))
return fd
|
philrosenfield/TPAGB-calib
|
selection_tests.py
|
Python
|
bsd-3-clause
| 8,573
| 0.008748
|
import os
import numpy as np
import sfh_tests
import ResolvedStellarPops as rsp
from TPAGBparams import snap_src
import galaxy_tests
def ms_color_cut():
comp90 = sfh_tests.read_completeness_table(absmag=True)
tri_dir = os.environ['TRILEGAL_ROOT']
# make these simulations by editing a constant sf trilegal file
# using tab_sfr/ as a source of templates.
sgals = [rsp.Galaxies.simgalaxy(tri_dir + 'const_sfr_out_z06.dat',
filter1='F606W', filter2='F814W'),
rsp.Galaxies.simgalaxy(tri_dir + 'const_sfr_out_z0006.dat',
filter1='F606W', filter2='F814W')]
dline = {}
for band in ['opt', 'ir']:
if band == 'opt':
filter2 = 'F814W'
else:
filter2 = 'F160W'
for sgal in sgals:
sgal.all_stages()
if not sgal.name in dline.keys():
dline[sgal.name] = {}
# only consider MS stars that are brighter than the faintest
# 90% complteness mag in the sample
iblue, = np.nonzero(sgal.data.get_col(filter2) <
np.max(comp90['%s_filter2' % band]))
ims = list(set(iblue) & set(sgal.ims))
ibheb = list(set(iblue) & set(sgal.ibheb))
ims = list(np.concatenate([ims, ibheb]))
if len(ims) == 0:
# could be an issue that no MS stars are around...
print 'warning', filter2, 'no MS found.'
continue
if band == 'opt':
dline[sgal.name]['F606W'] = np.max(sgal.data.get_col('F606W')[ims])
dline[sgal.name]['F814W'] = np.max(sgal.data.get_col('F814W')[ims])
dline[sgal.name]['F475W'] = np.max(sgal.data.get_col('F475W')[ims])
else:
dline[sgal.name]['F110W'] = np.max(sgal.data.get_col('F110W')[ims])
dline[sgal.name]['F160W'] = np.max(sgal.data.get_col('F160W')[ims])
return dline
def color_cut_per_galaxy(table='default'):
ms_dict = ms_color_cut()
comp90 = sfh_tests.read_completeness_table(table=table, absmag=True)
comp90_uncert = sfh_tests.read_completeness_table(table=table, uncertainties=True)
#if table == 'default':
# table = snap_src + '/tables/completeness_0.90.dat'
#table = table.replace('.dat', '_colorcuts.dat')
photsys = 'wfc3snap'
#fmt = '%(target)s %(opt_colorcut).3f %(ir_colorcut).3f \n'
for sgalname, dline in ms_dict.items():
print sgalname
for i, target in enumerate(comp90['target']):
for band in ['opt', 'ir']:
if band == 'opt':
filter1 = sfh_tests.get_filter1(target.lower())
filter2 = 'F814W'
else:
filter1 = 'F110W'
filter2 = 'F160W'
m2m = {'target': target, 'filter2': filter2, 'filter1': filter1}
if not filter1 in dline.keys():
continue
Mag1 = dline[filter1]
Mag2 = dline[filter2]
mag1 = rsp.astronomy_utils.Mag2mag(Mag1, fil
|
ter1, photsys, **m2m)
mag2 = rsp.astronomy_utils.Mag2mag(Mag2, filter2, photsys, **m2m)
color = mag1 - mag2
color_uncert = comp90_uncert[i]['%s_color' % band]
print target, fi
|
lter1, filter2, '%.2f' % (color+color_uncert)
def find_contamination_by_phases(output_files=None):
if output_files is None:
output_files = [ snap_src + '/models/varysfh/ddo71/caf09_s_nov13/mc/output_ddo71_caf09_s_nov13.dat',
#snap_src + '/models/varysfh/ddo78/caf09_s_nov13/mc/output_ddo78_caf09_s_nov13.dat',
snap_src + '/models/varysfh/hs117/caf09_s_nov13/mc/output_hs117_caf09_s_nov13.dat',
#snap_src + '/models/varysfh/kdg73/caf09_s_nov13/mc/output_kdg73_caf09_s_nov13.dat',
snap_src + '/models/varysfh/kkh37/caf09_s_nov13/mc/output_kkh37_caf09_s_nov13.dat']#,
#snap_src + '/models/varysfh/ngc2976-deep/caf09_s_nov13/mc/output_ngc2976-deep_caf09_s_nov13.dat',
#snap_src + '/models/varysfh/ngc404/caf09_s_nov13/mc/output_ngc404_caf09_s_nov13.dat']
for output_file in output_files:
target = output_file.split('output_')[1].split('_')[0]
print target
filter1 = sfh_tests.get_filter1(target)
ds = sfh_tests.Diagnostics(VarySFH_kw={'target': target})
ds.mc = False
sgal = rsp.Galaxies.simgalaxy(output_file, filter1=filter1,
filter2='F814W')
sgal.target = target
sopt_rgb, sopt_agb, sir_rgb, sir_agb = \
ds.do_normalization(filter1=filter1, trilegal_output=output_file,
hist_it_up=False, dry_run=True)
ds.contamination_by_phases(sopt_rgb, sopt_agb, sir_rgb, sir_agb)
return
def completeness_table_absmag(table='default'):
'''
convert the completeness table mags to abs mag.
outfile = [table]_absmag.dat
'''
comp90 = sfh_tests.read_completeness_table(table)
if table == 'default':
table = snap_src + '/tables/completeness_0.90.dat'
table = table.replace('.dat', '_absmag.dat')
photsys = 'wfc3snap'
fmt = '%(target)s %(opt_filter1).3f %(opt_filter2).3f %(ir_filter1).3f %(ir_filter2).3f \n'
with open(table, 'w') as out:
for i, target in enumerate(comp90['target']):
dline = {'target': target}
for band in ['opt', 'ir']:
if band == 'opt':
filter1 = sfh_tests.get_filter1(target.lower())
filter2 = 'F814W'
else:
filter1 = 'F110W'
filter2 = 'F160W'
m2m = {'target': target, 'filter2': filter2, 'filter1': filter1}
compf1 = comp90[i]['%s_filter1' % band]
dline['%s_filter1' % band] = rsp.astronomy_utils.mag2Mag(compf1, filter1, photsys, **m2m)
compf2 = comp90[i]['%s_filter2' % band]
dline['%s_filter2' % band] = rsp.astronomy_utils.mag2Mag(compf2, filter2, photsys, **m2m)
out.write(fmt % dline)
print 'wrote %s' % table
def uncertainties_at_completeness(table='default', binwidth=0.1):
'''
write a table with the median uncertainties around the completeness value
from the completeness table.
'''
comp90 = sfh_tests.read_completeness_table(table)
if table == 'default':
table = snap_src + '/tables/completeness_0.90.dat'
table = table.replace('.dat', '_uncertainties.dat')
opt_fits_src = snap_src + '/data/angst_no_trim'
fmt = '%(target)s %(opt_filter1).3f %(opt_filter2).3f %(opt_color).3f %(ir_filter1).3f %(ir_filter2).3f %(ir_color).3f \n'
title = '# ' + fmt.replace('%','').replace(')', '').replace('.3f','').replace('s','').replace('(','')
with open(table, 'w') as out:
out.write('# median uncertainty within +/-%.2f of completeness mag\n' % (binwidth/2))
out.write(title)
for i, target in enumerate(comp90['target']):
ir_gal = galaxy_tests.load_galaxy(target, band='ir')
opt_gal = galaxy_tests.load_galaxy(target, band='opt',
fits_src=opt_fits_src)
dline = {'target': target}
for band, gal in zip(['opt', 'ir'], [opt_gal, ir_gal]):
key = '%s_filter1' % band
uncerts1, = np.nonzero((gal.mag1 < comp90[i][key] + binwidth/2) &
(gal.mag1 > comp90[i][key] - binwidth/2))
med_unct1 = np.median(gal.data.MAG1_ERR[uncerts1])
dline[key] = med_unct1
key = '%s_filter2' % band
uncerts2, = np.nonzero((gal.mag2 < comp90[i][key] + binwidth/2) &
(gal.mag2 > comp90[i][key] - binwidth/2))
med_unct2 = np.median(gal.data.MAG2_
|
bnoi/scikit-tracker
|
sktracker/trajectories/tests/test_trajectories.py
|
Python
|
bsd-3-clause
| 14,672
| 0.003476
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
from nose.tools import assert_dict_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
import numpy as np
import tempfile
import pandas as pd
from sktracker import data
from sktracker.trajectories import Trajectories
def test_attributes():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert_array_equal(trajs.t_stamps, np.array([0, 1, 2, 3, 4]))
assert_array_equal(trajs.labels, np.array([0, 1, 2, 3, 4]))
segments = {0: [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0)],
1: [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1)],
2: [(0, 2), (1, 2), (2, 2), (3, 2), (4, 2)],
3: [(0, 3), (1, 3), (2, 3), (3, 3), (4, 3)],
4: [(0, 4), (1, 4), (2, 4), (3, 4), (4, 4)]}
assert_dict_equal(trajs.segment_idxs, segments)
traj = np.array([[ -9.25386045, 11.34555088, 22.11820326, 3. , 0. ],
[ 11.05321776, 3.23738477, 2.62790435, 2. , 1. ],
[ 16.6824928 , 14.602054 , -12.1218683 , 4. , 2. ],
[ 17.22410516, 14.8068125 , -11.87642753, 4. , 3. ],
[ 2.80222495, -13.13783042, 8.56406878, 0. , 4. ]])
t_stamp, traj_to_test = list(trajs.iter_segments)[0]
assert_array_almost_equal(traj, traj_to_test)
assert list(trajs.get_segments().keys()) == [0, 1, 2, 3, 4]
def test_structure():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert_raises(ValueError, trajs.check_trajs_df_structure, ['t_idx'])
assert_raises(ValueError, trajs.check_trajs_df_structure, ['t_stamp', 'label'], ['dx'])
trajs.check_trajs_df_structure(['t_stamp', 'label'], ['x', 'y', 'z'])
def test_copy():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert isinstance(trajs.copy(), Trajectories)
def test_empty():
empty = Trajectories.empty_trajs(columns=['x', 'y'])
assert empty.shape == (0, 2)
assert empty.empty is True
def test_reverse():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert trajs.reverse().shape == (25, 5)
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
trajs.reverse(inplace=True)
assert trajs.shape == (25, 5)
def test_write_hdf():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
tmp_store = tempfile.NamedTemporaryFile(suffix='h5')
with pd.get_store(tmp_store.name) as store:
store['trajs'] = trajs
def test_interpolate():
trajs = Trajectories(data.with_gaps_df())
trajs.set_index('true_label', inplace=True, append=True)
trajs.reset_index(level='label', drop=True, inplace=True)
trajs.index.set_names(['t_stamp', 'label'], inplace=True)
interpolated = trajs.time_interpolate(sampling=3, time_step=0.1, s=1)
# t_stamps_in = interpolated.index.get_level_values('t_stamp')
# indexer = t_stamps_in % 2 == 0
# interpolated.loc[indexer].shape, trajs.shape
# indexer = interpolated.t_stamps % 3 == 0
# assert interpolated.loc[indexer].shape[0] == trajs.shape[0]
dts = interpolated.get_segments()[0].t.diff().dropna()
# All time points should be equaly spaced
assert_almost_equal(dts.min(), dts.max())
def get_get_diff():
trajs = Trajectories(data.brownian_trajs_df())
diffs = trajs.get_diff()
x_diffs = diffs.to_dict()['x']
real_x_diffs = {(1, 2): 3.8452299074207819,
(3, 2): 4.7476193900872765,
(0, 0): np.nan,
(3, 0): 0.54161236467700746,
(0, 4): np.nan,
(1, 4): -5.6929349491048624,
(1, 3): -30.136494087633611,
(2, 3): 23.240228721514185,
(2, 1): -23.9264368052234,
(2, 4): 0.63465512968445115,
(4, 2): -4.5501817884252063,
(1, 0): 20.307078207040306,
(0, 3): np.nan,
(4, 0): -14.421880216023439,
(0, 1): np.nan,
(3, 3): -6.5845079821965991,
(4, 1): -19.329775838349192,
(3, 1): 18.084232469105203,
|
(4, 4): 24.644945052453025,
(0, 2): np.nan,
(2, 0): 5.6292750381105723,
(4, 3): 13.209596167161628,
(2, 2): -3.7469188310869228,
(3, 4): -17.381636024737336,
(1, 1): 13.827909766138866}
assert_almost_equal(x_diffs, real_x_diffs)
def test_get_speeds():
trajs = Trajectories(data.brownian_trajs_df())
speed
|
s = trajs.get_speeds().tolist()
real_speeds = [np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
857.99153458573994,
1596.9530747771976,
873.15267834726137,
1282.3088174598233,
408.98588960526808,
378.40023709328955,
1809.9895146014187,
917.93227668556324,
592.31881736181106,
0.48325048326444919,
0.39551116881922965,
798.29858694043128,
1085.3214310682606,
405.49164945495221,
550.37555144616226,
1406.707586739079,
1031.9444945962532,
1077.6619763794718,
1445.7789239945778,
739.66839622816326]
assert_almost_equal(speeds, real_speeds)
def test_scale():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
scaled = trajs.scale(factors=[2., 2., 2.],
coords=['x', 'y', 'z'], inplace=False)
assert_array_almost_equal(scaled[['x', 'y', 'z']] / 2., trajs[['x', 'y', 'z']])
trajs = trajs.scale(factors=[2., 2., 2.],
coords=['x', 'y', 'z'], inplace=True)
assert_array_almost_equal(scaled[['x', 'y', 'z']], trajs[['x', 'y', 'z']])
assert_raises(ValueError, trajs.scale, factors=[2., 2., 2.], coords=['x', 'y'], inplace=False)
def test_project():
trajs = Trajectories(data.directed_motion_trajs_df())
trajs.rename(columns={'true_label': 'new_label'}, inplace=True)
trajs.relabel()
trajs.project([0, 1],
coords=['x', 'y'],
keep_first_time=False,
reference=None,
inplace=True,
progress=False)
excepted = np.array([[ 0.27027431, 0. ],
[-0.27027431, 0. ],
[-0.25306519, 0.69683713],
[ 0.04633664, 0.31722648]])
assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4])
trajs = trajs.project([0, 1],
coords=['x', 'y'],
keep_first_time=False,
reference=None,
inplace=False,
progress=False)
assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4])
assert_raises(ValueError, trajs.project, [0, 1], coords=['x', 'y', 'z', 't'])
def test_get_colors():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
colors = trajs.get_colors()
assert colors == {0: '#FF0000', 1: '#ADFF00', 2: '#00FFA9', 3: '#0408FF', 4: '#FF00AC'}
colors = trajs.get_colors(alpha=0.5)
assert colors == {0: '#FF000080',
1: '#ADFF0080',
2: '#00FFA980',
3: '#0408FF80',
4: '#FF00A
|
cowlicks/dask
|
dask/array/reductions.py
|
Python
|
bsd-3-clause
| 22,730
| 0.001232
|
from __future__ import absolute_import, division, print_function
from functools import partial, wraps
from itertools import product, repeat
from math import factorial, log, ceil
import operator
import numpy as np
from toolz import compose, partition_all, merge, get, accumulate, pluck
from . import chunk
from .core import _concatenate2, Array, atop, sqrt, lol_tuples
from .numpy_compat import divide
from ..compatibility import getargspec, builtins
from ..base import tokenize
from ..context import _globals
from ..utils import ignoring, funcname
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None,
split_every=None, combine=None, name=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(i if i >= 0 else x.ndim + i for i in axis)
if dtype is not None and 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if dtype is not None and 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
# Map chunk across all blocks
inds = tuple(range(x.ndim))
tmp = atop(partial(chunk, axis=axis, keepdims=True), inds, x, inds)
tmp._chunks = tuple((1,)*len(c) if i in axis else c for (i, c)
in enumerate(tmp.chunks))
return _tree_reduce(tmp, aggregate, axis, keepdims, dtype, split_every,
combine, name=name)
def _tree_reduce(x, aggregate, axis, keepdims, dtype, split_every=None,
combine=None, name=None):
"""Perform the tree reduction step of a reduction.
Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
"""
# Normalize split_every
split_every = split_every or _globals.get('split_every', 4)
if isinstance(split_every, dict):
split_every = dict((k, split_every.get(k, 2)) for k in axis)
elif isinstance(split_every, int):
n = builtins.max(int(split_every ** (1/(len(axis) or 1))), 2)
split_every = dict.fromkeys(axis, n)
else:
split_every = dict((k, v) for (k, v) in enumerate(x.numblocks) if k in axis)
# Reduce across intermediates
depth = 1
for i, n in enumerate(x.numblocks):
if i in split_every and split_every[i] != 1:
depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
func = compose(partial(combine or aggregate, axis=axis, keepdims=True),
partial(_concatenate2, axes=axis))
for i in range(depth - 1):
x = partial_reduce(func, x, split_every, True, None,
name=(name or fu
|
ncname(combine or aggregate)) + '-partial')
func = compos
|
e(partial(aggregate, axis=axis, keepdims=keepdims),
partial(_concatenate2, axes=axis))
return partial_reduce(func, x, split_every, keepdims=keepdims,
dtype=dtype,
name=(name or funcname(aggregate)) + '-aggregate')
def partial_reduce(func, x, split_every, keepdims=False, dtype=None, name=None):
"""Partial reduction across multiple axes.
Parameters
----------
func : function
x : Array
split_every : dict
Maximum reduction block sizes in each dimension.
Examples
--------
Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
dimension, and 3 blocks in the 2nd dimension:
>>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP
"""
name = (name or funcname(func)) + '-' + tokenize(func, x, split_every,
keepdims, dtype)
parts = [list(partition_all(split_every.get(i, 1), range(n))) for (i, n)
in enumerate(x.numblocks)]
keys = product(*map(range, map(len, parts)))
out_chunks = [tuple(1 for p in partition_all(split_every[i], c)) if i
in split_every else c for (i, c) in enumerate(x.chunks)]
if not keepdims:
out_axis = [i for i in range(x.ndim) if i not in split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
out_chunks = list(getter(out_chunks))
dsk = {}
for k, p in zip(keys, product(*parts)):
decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
dummy = dict(i for i in enumerate(p) if i[0] not in decided)
g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
dsk[(name,) + k] = (func, g)
return Array(merge(dsk, x.dask), name, out_chunks, dtype=dtype)
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).sum().dtype
else:
dt = None
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = np.empty((1,), dtype=a._dtype).prod().dtype
else:
dt = None
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a._dtype, split_every=split_every)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a._dtype, split_every=split_every)
@wraps(chunk.any)
def any(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every)
@wraps(chunk.all)
def all(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every)
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = chunk.nansum(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt = chunk.nanprod(np.empty((1,), dtype=a._dtype)).dtype
else:
dt = None
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every)
@wraps(chunk.nancumsum)
def nancumsum(x, axis, dtype=None):
return cumreduction(chunk.nancumsum, operator.add, 0, x, axis, dtype)
@wraps(chunk.nancumprod)
def nancumprod(x, axis, dtype=None):
return cumreduction(chunk.nancumprod, operator.mul, 1, x, axis, dtype)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.nanmin, chunk.nanmin, axis=axis,
keepdims=keepdims, dtype=a._dtype, split_every=split_every)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False, split_every=None):
return reduction(a, chunk.nanmax, chunk.nanmax, axis=axis,
keepdims=keepdims, dtype=a._dtype, split_every=split_every)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwar
|
oasis-open/cti-pattern-validator
|
stix2patterns/v21/grammars/STIXPatternVisitor.py
|
Python
|
bsd-3-clause
| 6,827
| 0.01787
|
# Generated from STIXPattern.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .STIXPatternParser import STIXPatternParser
else:
from STIXPatternParser import STIXPatternParser
# This class defines a complete generic visitor for a parse tree produced by STIXPatternParser.
class STIXPatternVisitor(ParseTreeVisitor):
# Visit a parse tree produced by STIXPatternParser#pattern.
def visitPattern(self, ctx:STIXPatternParser.PatternContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressions.
def visitObservationExpressions(self, ctx:STIXPatternParser.ObservationExpressionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionOr.
def visitObservationExpressionOr(self, ctx:STIXPatternParser.ObservationExpressionOrContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionAnd.
def visitObservationExpressionAnd(self, ctx:STIXPatternParser.ObservationExpressionAndContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionRepeated.
def visitObservationExpressionRepeated(self, ctx:STIXPatternParser.ObservationExpressionRepeatedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionSimple.
def visitObservationExpressionSimple(self, ctx:STIXPatternParser.ObservationExpressionSimpleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionCompound.
def visitObservationExpressionCompound(self, ctx:STIXPatternParser.ObservationExpressionCompoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionWithin.
def visitObservationExpressionWithin(self, ctx:STIXPatternParser.ObservationExpressionWithinContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionStartStop.
def visitObservationExpressionStartStop(self, ctx:STIXPatternParser.ObservationExpressionStartStopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpression.
def visitComparisonExpression(self, ctx:STIXPatternParser.ComparisonExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionAnd.
def visitComparisonExpressionAnd(self, ctx:STIXPatternParser.ComparisonExpressionAndContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestEqual.
def visitPropTestEqual(self, ctx:STIXPatternParser.PropTestEqualContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestOrder.
def visitPropTestOrder(self, ctx:STIXPatternParser.PropTestOrderContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestSet.
def visitPropTestSet(self, ctx:STIXPatternParser.PropTestSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestLike.
def visitPropTestLike(self, ctx:STIXPatternParser.PropTestLikeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestRegex.
def visitPropTestRegex(self, ctx:STIXPatternParser.PropTestRegexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSubset.
def visitPropTestIsSubset(self, ctx:STIXPatternParser.PropTestIsSubsetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSuperset.
def visitPropTestIsSuperset(self, ctx:STIXPatternParser.PropTestIsSupersetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestParen.
def visitPropTestParen(self, ctx:STIXPatternParser.PropTestParenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestExists.
def visitPropTestExists(self, ctx:STIXPatternParser.PropTestExistsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#startStopQualifier.
def visitStartStopQualifier(self, ctx:STIXPatternParser.StartStopQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#withinQualifier.
def visitWithinQualifier(self, ctx:STIXPatternParser.WithinQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#repeatedQualifier.
def visitRepeatedQualifier(self, ctx:STIXPatternParser.RepeatedQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectPath.
def visitObjectPath(self, ctx:STIXPatt
|
ernParser.ObjectPathContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectType.
def visitObjectType(self, ctx:STIXPatternParser.ObjectTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#firstPathComponent.
def visitFirstPathComponent(self, ctx:STIXPatternParser.FirstPathComponentCo
|
ntext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#indexPathStep.
def visitIndexPathStep(self, ctx:STIXPatternParser.IndexPathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#pathStep.
def visitPathStep(self, ctx:STIXPatternParser.PathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#keyPathStep.
def visitKeyPathStep(self, ctx:STIXPatternParser.KeyPathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#setLiteral.
def visitSetLiteral(self, ctx:STIXPatternParser.SetLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#primitiveLiteral.
def visitPrimitiveLiteral(self, ctx:STIXPatternParser.PrimitiveLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#orderableLiteral.
def visitOrderableLiteral(self, ctx:STIXPatternParser.OrderableLiteralContext):
return self.visitChildren(ctx)
del STIXPatternParser
|
forgeousgeorge/new_dir
|
code.py
|
Python
|
mit
| 179
| 0.01676
|
def Woody():
# comple
|
te
print "Reach for the sky but don't burn your wings!"
|
# this will make it much easier in future problems to see that something is actually happening
|
evilhero/mylar
|
mylar/parseit.py
|
Python
|
gpl-3.0
| 36,412
| 0.010354
|
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software
|
Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from bs4 import BeautifulSoup
|
, UnicodeDammit
import urllib2
import re
import helpers
import logger
import datetime
import sys
from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
import mylar
def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
NOWyr = datetime.date.today().year
if datetime.date.today().month == 12:
NOWyr = NOWyr + 1
logger.fdebug("We're in December, incremented search Year to increase search results: " + str(NOWyr))
comicnm = ComicName.encode('utf-8').strip()
comicyr = ComicYear
comicis = Total
comicid = ComicID
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicid: " + str(comicid) )
comicnm_1 = re.sub('\+', '%2B', comicnm)
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
#print (str(cnt) + " results")
resultName = []
resultID = []
resultYear = []
resultIssues = []
resultURL = None
n_odd = -1
n_even = -1
n = 0
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
#print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
#print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
#print ( "Year: " + str(resultYear[n]) )
#print ( "Issues: " + str(resultIssues[n]) )
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub(' ', '', CleanResultName).lower()
#print ("CleanComicName: " + str(CleanComicName))
#print ("CleanResultName: " + str(CleanResultName))
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
#this has been seen in a few instances already, so trying to adjust.
#when the series year is 2011, in gcd it might be 2012 due to publication
#dates overlapping between Dec/11 and Jan/12. Let's accept a match with a
#1 year grace space, and then pull in the first issue to see the actual pub
# date and if coincides with the other date..match it.
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
#print ("n:" + str(n) + "...matched by year to Mylar!")
#print ( "Year: " + str(resultYear[n]) )
#Occasionally there are discrepancies in comic count between
#GCD and CV. 99% it's CV not updating to the newest issue as fast
#as GCD does. Therefore, let's increase the CV count by 1 to get it
#to match, any more variation could cause incorrect matching.
#ie. witchblade on GCD says 159 issues, CV states 161.
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total) +1 or (int(resultIssues[n]) +1) == int(Total):
#print ("initial issue match..continuing.")
if int(resultIssues[n]) == int(Total) +1:
issvariation = "cv"
elif int(resultIssues[n]) +1 == int(Total):
issvariation = "gcd"
else:
issvariation = "no"
#print ("n:" + str(n) + "...matched by issues to Mylar!")
#print ("complete match!...proceeding")
TotalIssues = resultIssues[n]
resultURL = str(resultID[n])
rptxt = resultp('td')[6]
resultPublished = rptxt.findNext(text=True)
#print ("Series Published: " + str(resultPublished))
break
n+=1
# it's possible that comicvine would return a comic name incorrectly, or gcd
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
#print ("numbs:" + str(numbs))
if numbs in ComicName.lower():
numconv = basnumbs[numbs]
#print ("numconv: " + str(numconv))
ComicNm = re.sub(str(numbs), str(numconv), ComicName.lower())
#print ("comicname-reVISED:" + str(ComicNm))
return GCDScraper(ComicNm, ComicYear, Total, ComicID)
break
if ComicName.lower().startswith('the '):
ComicName = ComicName[4:]
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if ':' in ComicName:
ComicName = re.sub(':', '', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if '-' in ComicName:
ComicName = re.sub('-', ' ', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if 'and' in ComicName.lower():
ComicName = ComicName.replace('and', '&')
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if not quickmatch: return 'No Match'
#vari_loop = 0
if quickmatch == "yes":
if resultURL is None: return 'No Match'
else: return 'Match'
return GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=ComicID, TotalIssues=TotalIssues, issvariation=issvariation, resultPublished=resultPublished)
def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariation, resultPublished):
gcdinfo = {}
gcdchoice = []
gcount = 0
i = 0
# datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
# #search for number as text, and change to n
|
offbyone/Flexget
|
flexget/webserver.py
|
Python
|
mit
| 9,166
| 0.002837
|
from __future__ import unicode_literals, division, absolute_import
import logging
import threading
im
|
port hashlib
import random
import socket
from sqlalchemy import Column, Integer, Unicode
from flask import Flask, abort, redirect
from flask.ext.login import UserMixin
from flexget import options, plugin
from flexget.event import event
from flexget.config_schema import register_config_key
from flexget.utils.tools import singleton
from flexget.manager import Base
from flexget.utils.database import with_session
from flexget.logger import console
log = logging.getLogg
|
er('web_server')
_home = None
_app_register = {}
_default_app = Flask(__name__)
random = random.SystemRandom()
web_config_schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'bind': {'type': 'string', 'format': 'ipv4', 'default': '0.0.0.0'},
'port': {'type': 'integer', 'default': 3539},
},
'additionalProperties': False
}
]
}
def generate_key():
""" Generate key for use to authentication """
return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits.
Taken from the django.utils.crypto module.
"""
return ''.join(random.choice(allowed_chars) for __ in range(length))
@with_session
def get_secret(session=None):
pass
""" Generate a secret key for flask applications and store it in the database. """
web_secret = session.query(WebSecret).first()
if not web_secret:
web_secret = WebSecret(id=1, value=get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'))
session.add(web_secret)
session.commit()
return web_secret.value
class User(Base, UserMixin):
""" User class available for flask apps to handle authentication using flask_login """
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Unicode(50), unique=True)
token = Column(Unicode, default=generate_key)
password = Column(Unicode)
def __repr__(self):
return '<User %r>' % self.name
def get_id(self):
return self.name
class WebSecret(Base):
""" Store flask secret in the database """
__tablename__ = 'secret'
id = Column(Unicode, primary_key=True)
value = Column(Unicode)
@event('config.register')
def register_config():
register_config_key('web_server', web_config_schema)
def register_app(path, application):
if path in _app_register:
raise ValueError('path %s already registered')
_app_register[path] = application
def register_home(route):
"""Registers UI home page"""
global _home
_home = route
@_default_app.route('/')
def start_page():
""" Redirect user to registered UI home """
if not _home:
abort(404)
return redirect(_home)
@event('manager.daemon.started', -255) # Low priority so plugins can register apps
@with_session
def setup_server(manager, session=None):
""" Sets up and starts/restarts the web service. """
if not manager.is_daemon:
return
web_server_config = manager.config.get('web_server')
if not web_server_config:
return
web_server = WebServer(
bind=web_server_config['bind'],
port=web_server_config['port'],
)
_default_app.secret_key = get_secret()
# Create default flexget user
if session.query(User).count() == 0:
session.add(User(name="flexget", password="flexget"))
session.commit()
if web_server.is_alive():
web_server.stop()
if _app_register:
web_server.start()
@event('manager.shutdown_requested')
def stop_server(manager):
""" Sets up and starts/restarts the webui. """
if not manager.is_daemon:
return
web_server = WebServer()
if web_server.is_alive():
web_server.stop()
@singleton
class WebServer(threading.Thread):
# We use a regular list for periodic jobs, so you must hold this lock while using it
triggers_lock = threading.Lock()
def __init__(self, bind='0.0.0.0', port=5050):
threading.Thread.__init__(self, name='web_server')
self.bind = str(bind) # String to remove unicode warning from cherrypy startup
self.port = port
self.server = None
def start(self):
# If we have already started and stopped a thread, we need to reinitialize it to create a new one
if not self.is_alive():
self.__init__(bind=self.bind, port=self.port)
threading.Thread.start(self)
def _start_server(self):
from cherrypy import wsgiserver
apps = {'/': _default_app}
for path, registered_app in _app_register.iteritems():
apps[path] = registered_app
d = wsgiserver.WSGIPathInfoDispatcher(apps)
self.server = wsgiserver.CherryPyWSGIServer((self.bind, self.port), d)
try:
host = self.bind if self.bind != "0.0.0.0" else socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
log.info('Web interface available at http://%s:%s' % (host, self.port))
self.server.start()
def run(self):
self._start_server()
def stop(self):
log.info('Shutting down web server')
if self.server:
self.server.stop()
@with_session
def do_cli(manager, options, session=None):
try:
if hasattr(options, 'user'):
options.user = options.user.lower()
if options.action == 'list':
users = session.query(User).all()
if users:
max_width = len(max([user.name for user in users], key=len)) + 4
console('_' * (max_width + 56 + 9))
console('| %-*s | %-*s |' % (max_width, 'Username', 56, 'API Token'))
if users:
for user in users:
console('| %-*s | %-*s |' % (max_width, user.name, 56, user.token))
else:
console('No users found')
if options.action == 'add':
exists = session.query(User).filter(User.name == options.user).first()
if exists:
console('User %s already exists' % options.user)
return
user = User(name=options.user, password=options.password)
session.add(user)
session.commit()
console('Added %s to the database with generated API Token: %s' % (user.name, user.token))
if options.action == 'delete':
user = session.query(User).filter(User.name == options.user).first()
if not user:
console('User %s does not exist' % options.user)
return
session.delete(user)
session.commit()
console('Deleted user %s' % options.user)
if options.action == 'passwd':
user = session.query(User).filter(User.name == options.user).first()
if not user:
console('User %s does not exist' % options.user)
return
user.password = options.password
session.commit()
console('Updated password for user %s' % options.user)
if options.action == 'gentoken':
user = session.query(User).filter(User.name == options.user).first()
if not user:
console('User %s does not exist' % options.user)
return
user.token = generate_key()
session.commit()
console('Generated new token for user %s' % user.name)
console('Token %s' % user.token)
finally:
session.close()
@event('options.register')
def register_parser_arguments():
parser = options.register_command('users', do_cli, help='Manage users providing access to the web ser
|
retr0h/ansible
|
lib/ansible/inventory/__init__.py
|
Python
|
gpl-3.0
| 15,950
| 0.005078
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software
|
Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#########################
|
####################
import fnmatch
import os
import re
import subprocess
import ansible.constants as C
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible import errors
from ansible import utils
class Inventory(object):
"""
Host inventory for ansible.
"""
__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
'_pattern_cache', '_vars_plugins', '_playbook_basedir']
def __init__(self, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._groups_list = {}
self._pattern_cache = {}
# to be set by calling set_playbook_basedir by ansible-playbook
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = []
# a list of host(names) to contain current inquiries to
self._restriction = None
self._also_restriction = None
self._subset = None
if isinstance(host_list, basestring):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
if host_list is None:
self.parser = None
elif isinstance(host_list, list):
self.parser = None
all = Group('all')
self.groups = [ all ]
ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
for x in host_list:
m = ipv6_re.match(x)
if m:
all.add_host(Host(m.groups()[0], m.groups()[1]))
else:
if ":" in x:
tokens = x.rsplit(":", 1)
# if there is ':' in the address, then this is a ipv6
if ':' in tokens[0]:
all.add_host(Host(x))
else:
all.add_host(Host(tokens[0], tokens[1]))
else:
all.add_host(Host(x))
elif os.path.exists(host_list):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(filename=host_list)
self.groups = self.parser.groups.values()
elif utils.is_executable(host_list):
self.parser = InventoryScript(filename=host_list)
self.groups = self.parser.groups.values()
else:
self.parser = InventoryParser(filename=host_list)
self.groups = self.parser.groups.values()
utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
def _match(self, str, pattern_str):
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
def get_hosts(self, pattern="all"):
"""
find all host names matching a pattern string, taking into account any inventory restrictions or
applied subsets.
"""
# process patterns
if isinstance(pattern, list):
pattern = ';'.join(pattern)
patterns = pattern.replace(";",":").split(":")
hosts = self._get_hosts(patterns)
# exclude hosts not in a subset, if defined
if self._subset:
subset = self._get_hosts(self._subset)
new_hosts = []
for h in hosts:
if h in subset and h not in new_hosts:
new_hosts.append(h)
hosts = new_hosts
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
hosts = [ h for h in hosts if h.name in self._restriction ]
if self._also_restriction is not None:
hosts = [ h for h in hosts if h.name in self._also_restriction ]
return hosts
def _get_hosts(self, patterns):
"""
finds hosts that match a list of patterns. Handles negative
matches as well as intersection matches.
"""
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
else:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
patterns = pattern_regular + pattern_intersection + pattern_exclude
hosts = []
for p in patterns:
that = self.__get_hosts(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
for h in that:
if h not in hosts:
hosts.append(h)
return hosts
def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
"""
if pattern in self._pattern_cache:
return self._pattern_cache[pattern]
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
result = self._apply_ranges(pattern, hpat)
self._pattern_cache[pattern] = result
return result
def _enumeration_info(self, pattern):
"""
returns (pattern, limits) taking a regular pattern and finding out
which parts of it correspond to start/stop offsets. limits is
a tuple of (start, stop) or None
"""
if not "[" in pattern or pattern.startswith('~'):
return (pattern, None)
(first, rest) = pattern.split("[")
rest = rest.replace("]","")
try:
# support selectors like webservers[0]
x = int(rest)
return (first, (x,x))
except:
pass
if "-" in rest:
(left, right) = rest.split("-",1)
return (first, (left, right
|
Saturn/champsleagueviz
|
qualify/dl.py
|
Python
|
mit
| 1,765
| 0.006799
|
import re, requests, csv, time, traceback
from bs4 import BeautifulSoup
teams = []
for group in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:
try:
url = "http://www.oddschecker.com/football/champions-league/champions-league-group-%s/to-qualify" % group
print "getting {}".format(url)
soup = BeautifulSoup(requests.get(url, cookies={"odds_type":"decimal"}).text)
table = soup.find(attrs={"class":"eventTable"})
sitesrow = table.find_all("tr", {"class": "eventTableHeader"})
sitelinks = sitesrow[0].find_all(lambda t: t.has_attr("title"))
sites = [t["title"] for t in sitelinks]
teamrows = table.find_all(attrs={"class": "eventTableRow"})
for row in teamrows:
cols = [t.text for t in row.find_all("td")]
nam
|
e = cols[1]
if 'any other' in name.lower(): continue
odds = []
isanodd = lambda t: (t.name=="td" and t.has_attr("class") and
('o' in t.attrs["class"] or
'oi' in t.attrs["class"] or
'oo' in t.attrs["class"]))
rawodds = [t.text for t in row.find_all(isanodd)]
for o in rawodds:
if not o or '-' in o: od
|
ds.append(None)
else: odds.append(float(o))
assert len(odds) == len(sites), "{} {}".format(odds, sites)
teams.append([name, group] + odds)
except:
print "Unexpected error. skipping"
traceback.print_exc()
t = str(time.time()).split(".")[0]
with file("raw/odds%s.csv" % t, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['name', 'group'] + sites)
for row in teams:
w.writerow(row)
|
damien-dg/horizon
|
openstack_dashboard/contrib/sahara/api/sahara.py
|
Python
|
apache-2.0
| 15,639
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from saharaclient.api.base import APIException
from saharaclient import client as api_client
LOG = logging.getLogger(__name__)
# "type" of Sahara service registered in keystone
SAHARA_SERVICE = 'data-processing'
# Sahara service_type registered in Juno
SAHARA_SERVICE_FALLBACK = 'data_processing'
SAHARA_AUTO_IP_ALLOCATION_ENABLED = getattr(
settings,
'SAHARA_AUTO_IP_ALLOCATION_ENABLED',
False)
VERSIONS = base.APIVersionManager(
SAHARA_SERVICE,
preferred_version=getattr(settings,
'OPENSTACK_API_VERSIONS',
{}).get(SAHARA_SERVICE, 1.1))
VERSIONS.load_supported_version(1.1, {"client": api_client,
"version": 1.1})
def safe_call(func, *args, **kwargs):
"""Call a function ignoring Not Found error
This method is supposed to be used only for safe retrieving Sahara
objects. If the object is no longer available the None should be
returned.
"""
try:
return func(*args, **kwargs)
except APIException as e:
if e.error_code == 404:
return None # Not found. Exiting with None
raise # Other errors are not expected here
@memoized
def client(request):
try:
service_type = SAHARA_SERVICE
sahara_url = base.url_for(request, service_type)
except exceptions.ServiceCatalogException:
# if no endpoint found, fallback to the old service_type
service_type = SAHARA_SERVICE_FALLBACK
sahara_url = base.url_for(request, service_type)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return api_client.Client(VERSIONS.get_active_version()["version"],
sahara_url=sahara_url,
service_type=service_type,
project_id=request.user.project_id,
input_auth_token=request.user.token.id,
insecure=insecure,
cacert=cacert)
def image_list(request, search_opts=None):
return client(request).images.list(search_opts=search_opts)
def image_get(request, image_id):
return client(request).images.get(id=image_id)
def image_unregister(request, image_id):
client(request).images.unregister_image(image_id=image_id)
def image_update(request, image_id, user_name, desc):
client(request).images.update_image(image_id=image_id,
user_name=user_name,
desc=desc)
def image_tags_update(request, image_id, image_tags):
client(request).images.update_tags(image_id=image_id,
new_tags=image_tags)
def plugin_list(request, search_opts=None):
return client(request).plugins.list(search_opts=search_opts)
def plugin_get(request, plugin_name):
return client(request).plugins.get(plugin_name=plugin_name)
def plugin_get_version_details(request, plugin_name, hadoop_version):
return client(request).plugins.get_version_details(
plugin_name=plugin_name,
hadoop_version=hadoop_version)
def plugin_convert_to_template(request, plugin_name, hadoop_version,
template_name, file_content):
return client(request).plugins.convert_to_cluster_template(
plugin_name=plugin_name,
hadoop_version=hadoop_version,
template_name=template_name,
filecontent=file_content)
def nodegroup_template_create(request, name, plugin_name, hadoop_version,
flavor_id, description=None,
volumes_per_node=None, volumes_size=None,
node_processes=None, node_configs=None,
floating_ip_pool=None, security_groups=None,
auto_security_group=False,
availability_zone=False,
volumes_availability_zone=False,
volume_type=None,
is_proxy_gateway=False,
volume_local_to_instance=False,
use_autoconfig=None):
return client(request).node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
description=description,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=node_processes,
node_configs=node_configs,
floating_ip_pool=floating_ip_pool,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=volumes_availability_zone,
volume_type=volume_type,
is_proxy_gateway=is_proxy_gateway,
volume_local_to_instance=volume_local_to_instance,
use_autoconfig=use_autoconfig)
def nodegroup_template_list(request, search_opts=None):
return client(request).node_group_
|
templates.list(search_opts=search_opts)
def nodegroup_template_get(request, ngt_id):
return client(request).node_group_templates.get(ng_template_id=ngt_id)
def nodegroup_template_find(request, **kwargs):
return client(request).node_group_templates.find(**kwargs)
def nodegroup_template_delete(request, ngt_id):
client(request).node_group_templ
|
ates.delete(ng_template_id=ngt_id)
def nodegroup_template_update(request, ngt_id, name, plugin_name,
hadoop_version, flavor_id,
description=None, volumes_per_node=None,
volumes_size=None, node_processes=None,
node_configs=None, floating_ip_pool=None,
security_groups=None, auto_security_group=False,
availability_zone=False,
volumes_availability_zone=False,
volume_type=None,
is_proxy_gateway=False,
volume_local_to_instance=False,
use_autoconfig=None):
return client(request).node_group_templates.update(
ng_template_id=ngt_id,
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
description=description,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=node_processes,
node_configs=node_configs,
floating_ip_pool=floating_ip_pool,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=volumes_availability_zone,
volume_type=volume_type,
is_proxy_gateway=is_proxy_gateway,
volume_local_to_instance=volume_local_to_instance,
use_autoconfig=use_autoconfig)
def cluster_template_create(request, name, plugin_name, hadoop_version,
description=None, cluster_configs=None,
node_groups=None, anti_affinity=None,
net_id=None, use_autoconfig=None):
return client(request).cluster_templates.create(
name=name,
|
Andrey-Tkachev/infection
|
models/room.py
|
Python
|
mit
| 6,066
| 0.000989
|
from bson.objectid import ObjectId
import json
class Room():
def __init__(self, players_num, objectid, table, current_color='purple'):
if players_num:
self.players_
|
num = players_num
else:
self.players_num = 0
for el in ['p', 'b', 'g', 'r']:
if el in table:
self.players_num += 1
|
self.objectid = objectid
self.current_color = current_color
self.players_dict = {}
self.alredy_ex = []
self.colors = []
self.winner = None
for col in ['p', 'b', 'g', 'r']:
if col in table:
self.colors.append(
{'p': 'purple',
'b': 'blue',
'g': 'green',
'r': 'red'}[col])
if current_color in self.colors:
self.current_color = current_color
else:
self.current_color = self.colors[0]
self.users_nicks = {}
self.color_player_dict = {'purple': None, 'blue': None, 'green': None, 'red': None}
self.player_color_dict = {}
self.status = 'waiting'
def get_player_by_color(self, color):
if color in self.color_player_dict:
return self.players_dict[self.color_player_dict[color]]
return None
def get_color_by_player(self, player_id):
if player_id in self.player_color_dict:
return self.player_color_dict[player_id]
return None
def add_player(self, player_id, name):
self.players_dict[player_id] = False
self.users_nicks[player_id] = name
for color in self.colors:
if not self.color_player_dict[color]:
self.color_player_dict[color] = player_id
self.player_color_dict[player_id] = color
break
def dell_player(self, player_id):
self.players_dict[player_id] = False
return self
def change_row(self, row, i, to):
return row[:i] + to + row[i + 1:]
def update_table(self, move, table):
print('Table updating')
pymove = json.loads(move)
pytable = json.loads(table)
print('Old table:')
for row in pytable:
print(' ', row)
x0, y0 = int(pymove['X0']), int(pymove['Y0'])
x1, y1 = int(pymove['X1']), int(pymove['Y1'])
print('Move from ({}, {}) to ({}, {})'.format(x0, y0, x1, y1))
if ((abs(x1 - x0) > 1) or (abs(y1 - y0) > 1)):
pytable[x0] = self.change_row(pytable[x0], y0, 'e')
for i in range(-1, 2):
for j in range(-1, 2):
if (x1 + i < len(pytable)) and (x1 + i > -1):
if (y1 + j < len(pytable[x1])) and (y1 + j > -1):
if pytable[x1 + i][y1 + j] != 'e':
pytable[x1 + i] = self.change_row(pytable[x1 + i], y1 + j, self.current_color[0].lower())
pytable[x1] = self.change_row(pytable[x1], y1, self.current_color[0].lower())
res = json.dumps(pytable)
if 'e' not in res:
r_count = (res.count('r'), 'red')
b_count = (res.count('b'), 'blue')
g_count = (res.count('g'), 'green')
p_count = (res.count('p'), 'purple')
sort_list = [r_count, b_count, p_count, g_count]
sort_list.sort()
self.winner = sort_list[-1][1]
print('New table:')
for row in pytable:
print(' ', row)
return res
def can_move(self, table):
pytable = json.loads(table)
for row_id, row in enumerate(pytable):
for char_id in range(len(row)):
char = row[char_id]
if char == self.current_color[0].lower():
for i in range(-2, 3):
for j in range(-2, 3):
if (row_id + i < len(pytable)) and (row_id + i > -1):
if (char_id + j < len(row)) and (char_id + j > -1):
if pytable[row_id + i][char_id + j] == 'e':
return True
return False
def change_color(self, table):
print('Сolor changing')
colors = self.colors
self.current_color = colors[
(colors.index(self.current_color) + 1) % self.players_num]
i = 1
while ((not self.players_dict[self.color_player_dict[self.current_color]]) or (not self.can_move(table))) and (i <= 5):
self.current_color = colors[
(colors.index(self.current_color) + 1) % self.players_num]
i += 1
if not self.can_move(table):
return None
return self.current_color
class RoomsManager():
def __init__(self, db):
# dict of rooms by their obj_id
self.db = db
self.rooms_dict = {}
def get_room(self, objectid):
if objectid not in self.rooms_dict:
rid = objectid
room_in_db = self.db.rooms.find_one({'_id': ObjectId(rid)})
if room_in_db:
print('Room', objectid, 'extrapolated from db')
new_room = Room(
int(room_in_db['players_num']), rid, room_in_db['table'])
new_room.current_color = room_in_db['current_color']
for user_id in room_in_db['players']:
player = room_in_db['players'][user_id]
new_room.color_player_dict[player['color']] = user_id
new_room.player_color_dict[user_id] = player['color']
new_room.users_nicks[user_id] = player['nick']
new_room.players_dict[user_id] = None
self.rooms_dict[rid] = new_room
else:
return None
return self.rooms_dict[objectid]
def add_room(self, room):
self.rooms_dict[room.objectid] = room
def rooms(self):
for objectid in self.rooms_dict:
yield self.rooms_dict[objectid]
|
trolleway/domofoto_parser
|
parce_city.py
|
Python
|
cc0-1.0
| 4,024
| 0.044757
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# anchor extraction from html document
from bs4 import BeautifulSoup
import urllib2
import urlparse
import re
import csv
import time
cityId='712'
webpage = urllib2.urlopen('http://domofoto.ru/list.php?cid='+cityId)
soup = BeautifulSoup(webpage)
maxST=0
for element in soup.find_all('a'):
url=element.get('href', '/')
if url.find('st')>0: #TODO rename
par = urlparse.parse_qs(urlparse.urlparse(url).query)
currentST=int(par['st'][0])
if currentST > maxST:
maxST=currentST
#print 'max st='+str(maxST)
#получили смещение максимальной страницы города
#определение списка адресов отдельных страниц домов города
recPerPage=30
pagesCount = maxST // recPerPage
housesPages=[]
for pageST in range(0,pagesCount+1): #
url="http://domofoto.ru/list.php?cid="+cityId+"&st="+str(pageST*recPerPage)
#print url
housesPages.append(url)
#print housesPages
housesIds=[]
housenumber=0
allhousescnt=pagesCount*recPerPage
for housePage in housesPages:
webpage = urllib2.urlopen(housePage)
soup = BeautifulSoup(webpage)
for element in soup.find_all('a'):
url=element.get('href', '/')
if url.find('house')>0: #TODO rename
#print url
houseId=url[7:-1]
#print houseId
housesIds.append(houseId)
#print housesIds
webpage=0
from time import gmtime, strftime
csvFileName='domofoto_'+strftime("%Y-%m-%d-%H-%M-%S", gmtime())
writer = csv.writer(open(csvFileName+'.csv', 'w'))
writer.writerow(['x','y','projectCode','projectName','seriesCode','seriesName','constructionStartDate','constructionEndDate','mode','levels'])
#write vrt file for open csv i
|
n ogr2ogr
vrt_file='''
|
<OGRVRTDataSource>
<OGRVRTLayer name="'''+csvFileName+'''">
<LayerSRS>WGS84</LayerSRS>
<SrcDataSource>'''+csvFileName+'''.csv</SrcDataSource>
<GeometryType>wkbPoint</GeometryType>
<GeometryField encoding="PointFromColumns" x="x" y="y"/>
</OGRVRTLayer>
</OGRVRTDataSource>
'''
vrtf = open(csvFileName+".vrt","w")
vrtf.write(vrt_file)
vrtf.close()
for houseId in housesIds:
housenumber=housenumber+1 #for progress display
housePageURL='http://domofoto.ru/house/'+houseId+'/'
print housePageURL + ' ' +str(housenumber) + '/'+str(allhousescnt)
constructionEndDate=''
constructionStartDate=''
seriesCode=''
seriesName=''
projectCode=''
projectName=''
mode=''
levels=''
webpage = urllib2.urlopen(housePageURL)
#soup = BeautifulSoup(webpage)
html = webpage.read()
coordsPart=re.search('initialize\(\[(.+?), ',html)
if coordsPart:
y = coordsPart.group(1)
coordsPart=re.search(',(.+?)\], true',html)
if coordsPart:
x = coordsPart.group(1)
coordsPart=re.search('Проект.*projects/(.+?)/',html)
if coordsPart:
projectCode = coordsPart.group(1)
coordsPart=re.search('Серия.*projects/(.+?)/',html)
if coordsPart:
seriesCode = coordsPart.group(1)
coordsPart=re.search('Проект.*proj.*>(.+?)</a>',html)
if coordsPart:
projectName = coordsPart.group(1)
coordsPart=re.search('Серия.*proj.*>(.+?)</a>',html)
if coordsPart:
seriesName = coordsPart.group(1)
coordsPart=re.search('Окончание.*строительства.*<b>(.+?)</b>',html)
if coordsPart:
constructionEndDate = coordsPart.group(1)
coordsPart=re.search('Начало.*строительства.*<b>(.+?)</b>',html)
if coordsPart:
constructionStartDate = coordsPart.group(1)
coordsPart=re.search('Текущее состояние.* (.+?) </td></tr>',html)
if coordsPart:
mode = coordsPart.group(1)
coordsPart=re.search('Этажность.*d">(.+?)</td></tr>',html)
if coordsPart:
levels = coordsPart.group(1)
row=[x,y,projectCode,projectName,seriesCode,seriesName,constructionStartDate,constructionEndDate,mode,levels]
writer.writerow(row)
#quit()
#print html
ogr2ogrString='''ogr2ogr -overwrite -f "GeoJSON" '''+csvFileName+'''.geojson '''+csvFileName+'''.vrt'''
print ogr2ogrString
|
mcclurmc/juju
|
juju/providers/orchestra/tests/test_bootstrap.py
|
Python
|
agpl-3.0
| 5,474
| 0
|
from xmlrpclib import Fault
from yaml import dump
from twisted.internet.defer import succeed, inlineCallbacks
from juju.errors import ProviderError
from juju.lib.testing import TestCase
from juju.providers.orchestra.machine import OrchestraMachine
from juju.providers.orchestra.tests.common import OrchestraTestMixin
class OrchestraBootstrapTest(TestCase, OrchestraTestMixin):
def mock_verify(self):
self.mock_fs_put("http://somewhe.re/webdav/bootstrap-verify",
"storage is writable")
def mock_save_state(self):
data = dump({"zookeeper-instances": ["winston-uid"]})
self.mock_fs_put("http://somewhe.re/webdav/provider-state", data)
def mock_surprise_shutdown(self):
self.proxy_m.callRemote("get_systems")
self.mocker.result(succeed([{
"uid": "winston-uid",
"ks_meta": {
"MACHINE_ID": "blah",
"USER_DATA_BASE64": "userdata",
"KEEP": "keep"},
"mgmt_classes": ["acquired", "PRESERVE"]}]))
self.proxy_m.callRemote("find_system", {"uid": "winston-uid"})
self.mocker.result(succeed(["winston"]))
self.proxy_m.callRemote("get_system_handle", "winston", "TOKEN")
self.mocker.result(succeed("some-handle"))
self.proxy_m.callRemote(
"modify_system", "some-handle", "ks_meta", {"KEEP": "keep"},
"TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote(
"modify_sys
|
tem", "some-handle",
"mgmt_classes", ["available", "PRESERVE"], "TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote(
"modify_system", "some-handle", "netboot_enabled", True, "TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote("save_s
|
ystem", "some-handle", "TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote(
"background_power_system",
{"power": "off", "systems": ["winston"]}, "TOKEN")
self.mocker.result(succeed("ignored"))
def test_already_bootstrapped(self):
self.setup_mocks()
self.mock_find_zookeepers(("winston-uid", "winston"))
self.mocker.replay()
def verify_machines(machines):
(machine,) = machines
self.assertTrue(isinstance(machine, OrchestraMachine))
self.assertEquals(machine.instance_id, "winston-uid")
d = self.get_provider().bootstrap()
d.addCallback(verify_machines)
return d
def test_no_machines_available(self):
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems(acceptable=False)
self.mocker.replay()
d = self.get_provider().bootstrap()
self.assertFailure(d, ProviderError)
def verify_auth_error(self, error):
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems()
self.mock_acquire_system(error)
self.mocker.replay()
d = self.get_provider().bootstrap()
self.assertFailure(d, type(error))
def test_non_auth_fault(self):
return self.verify_auth_error(Fault("blah", "some random error"))
def test_non_auth_error(self):
return self.verify_auth_error(Exception("fiddlesticks"))
@inlineCallbacks
def verify_change_failures(self, **kwargs):
log = self.capture_logging("juju.orchestra")
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems()
self.mock_acquire_system()
self.mock_start_system(
self.get_verify_ks_meta(0, "bootstrap_user_data"), **kwargs)
self.mock_surprise_shutdown()
self.mocker.replay()
d = self.get_provider().bootstrap()
yield self.assertFailure(d, ProviderError)
self.assertIn(
"Failed to launch machine winston-uid; attempting to revert.",
log.getvalue())
def test_cannot_modify_machine(self):
"""
Check that failures when launching the machine cause an (attempt to)
roll back to an unacquired state.
"""
return self.verify_change_failures(fail_modify=True)
def test_cannot_save_machine(self):
"""
Check that failures when launching the machine cause an (attempt to)
roll back to an unacquired state.
"""
return self.verify_change_failures(fail_save=True)
def test_launch_available_machine(self):
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems()
self.mock_acquire_system()
self.mock_start_system(
self.get_verify_ks_meta(0, "bootstrap_user_data"))
self.mock_describe_systems(succeed([{
"uid": "winston-uid",
"name": "winston",
"mgmt_classes": ["acquired"],
"netboot_enabled": True}]))
self.mock_save_state()
self.mocker.replay()
def verify_machines(machines):
(machine,) = machines
self.assertTrue(isinstance(machine, OrchestraMachine))
self.assertEquals(machine.instance_id, "winston-uid")
self.assertEquals(machine.dns_name, "winston")
d = self.get_provider().bootstrap()
d.addCallback(verify_machines)
return d
|
eustislab/horton
|
horton/io/test/test_cp2k.py
|
Python
|
gpl-3.0
| 2,512
| 0.004379
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from horton import *
def test_atom_si_uks():
fn_out = context.get_fn('test/atom_si.cp2k.out')
mol = IOData.from_file(fn_out)
assert (mol.numbers == [14]).all()
assert (mol.pseudo_numbers == [4]).all()
assert (mol.exp_alpha.occupations == [1, 1, 1, 0]).all()
assert (mol.exp_beta.occupations == [1, 0, 0, 0]).all()
assert abs(mol.exp_alpha.energies - [-0.398761, -0.154896, -0.154896, -0.154896]).max() < 1e-4
assert abs(mol.exp_beta.energies - [-0.334567, -0.092237, -0.092237, -0.092237]).max() < 1e-4
assert abs(mol.energy - -3.761587698067) < 1e-10
assert (mol.obasis.shell_types == [0, 0, 1, 1, -2]).all()
olp = mol.obasis.compute_overlap(mol.lf)
ca = mol.exp_alpha.coeffs
cb = mol.exp_beta.coeffs
assert abs(np.diag(olp._array[:2,:2]) - np.array([0.42921199338707744, 0.32067871530183140])).max() < 1e-5
assert abs(np.dot(ca.T, np.dot(olp._array, ca)) - np.identity(4)).max()
|
< 1e-5
assert abs(np.dot(cb.T, np.dot(olp._array, cb)) - np.identity(4)).max() < 1e-5
def test_atom_o_rks():
fn_out = context.get_fn('test/atom_om2.cp2k.out')
mol = IOData.from_file(fn_out)
assert (mol.numbers == [8]).all()
assert (mol.pseudo_numbers == [6]).all()
assert (mol.exp_alpha.occupations == [1, 1, 1, 1]).all()
|
assert abs(mol.exp_alpha.energies - [0.102709, 0.606458, 0.606458, 0.606458]).max() < 1e-4
assert abs(mol.energy - -15.464982778766) < 1e-10
assert (mol.obasis.shell_types == [0, 0, 1, 1, -2]).all()
olp = mol.obasis.compute_overlap(mol.lf)
ca = mol.exp_alpha.coeffs
assert abs(np.dot(ca.T, np.dot(olp._array, ca)) - np.identity(4)).max() < 1e-5
|
krakky/market
|
cloudera_cdh/bin/ubuntu/xenial/12-activate-parcel.py
|
Python
|
apache-2.0
| 1,599
| 0.026892
|
#!/usr/bin/python
from cm_api.api_client import ApiResource
import time
api = ApiResource(sys.argv[1], 7180, "acm", "SCALE42secretly", version=15)
cluster = None
try:
cluster = api.get_cluster(name = "ACM Cluster")
except Exception, e:
if e.message[-
|
10:-1].lower() == "not found":
print "<ACM CLUSTER> NOT FOUND ! - not proceeding any further..."
exit()
#Find available parcels...
available_parcels = cluster.get_all_parcels()
CDH_TARGET = None
for p in available_parcels:
if p.product.lower() == "cdh" and p.version[:1] == "5":
CDH_TARGET = { "name" : p.product, "v
|
ersion" : p.version }
break
if CDH_TARGET is not None:
parcel = cluster.get_parcel(CDH_TARGET['name'] , CDH_TARGET['version'])
if parcel.stage == "ACTIVATED":
print "Parcel <{0}-v{1}> is already <ACTIVATED> across the entire cluster !".format(CDH_TARGET['name'] , CDH_TARGET['version'])
elif parcel.stage == "DISTRIBUTED":
try:
print "Activating <{0}-v{1}> parcel across the cluster !".format(CDH_TARGET['name'] , CDH_TARGET['version'])
parcel.activate()
time.sleep(10)
#Restart the ACM cluster
print "Restarting <{0}> cluster through the cloudera manager !".format(cluster.name)
cluster.stop().wait()
cluster.start().wait()
print "Ready to start rolling with Cloudera Managaer and <ACM Cluster> !"
except Exception, e:
print "Unable to activate parcel <{0}-v{1}> and restart cluster !!! reason : {2}".format(CDH_TARGET['name'] , CDH_TARGET['version'], e.message)
else:
print "We were unable to target any CDH-5 parcel available remotely !"
|
bobrock/eden
|
languages/ja.py
|
Python
|
mit
| 353,167
| 0.022626
|
# -*- coding: utf-8 -*-
{
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'この地域を地理的に指定するロケーション。これはロケーションの階層構造のうちの一つか、ロケーショングループの一つか、この地域の境界に面するロケーションです。',
"Acronym of the organization's name, eg. IFRC.": '団体の略称 (IFRCなど)',
"Authenticate system's Twitter account": '認証システムの Twitter アカウント',
"Can't import tweepy": 'tweepyをインポートできません',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "救援要請と寄付項目を関連付けるには、項目左の'寄付'ボタンを押してください。",
"Couldn't import tweepy library": 'tweepy libraryをインポートできません',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": 'サイトの所在地住所を詳細に記述します。情報伝達と物品搬送に使用します。このサイトに関する情報を、以下の「ロケーション」項目にGIS/地図データを挿入できることに注意してください。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'もしこの設定が地域メニューにある地域を指しているのであれば、メニューで使う名前を設定してください。個人用の地図設定の名前では、ユーザの名前で設定されます。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとしてアサインされるように指定することができます。ただし、ユーザーのドメインと団体のドメイン項目に差異がない場合のみ有効です。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'この項目の内容はユーザーの基本所在地となり、ユーザーが地図上に表示されるようになります。',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'この設定が有効の場合、削除されたレコードには削除済みフラグが付与されるだけで、実際のデータは消去されません。一般のユーザが閲覧することはできませんが、データベースを直接参照することでデータを確認できます。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '行方不明者の登録が存在しない場合、「人物情報を追加」ボタンを押して、新規登録を行ってください。',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'リストに病院が表示されない場合、「病院情報を追加」することで新規に登録が可能です。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": 'オフィスが一覧にない場合は、「オフィスを追加」をクリックすることで新規のオフィスを追加できます。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "もしあなたの団体の登録がない場合、'団体を追加'リンクをクリックすることで追加が可能です",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'データを同期する際には、ネットワークを経由してではなく、ファイルから行うことも可能です。ネットワークが存在しない場合に利用されます。ファイルからのデータインポート、およびファイルへのエクスポートはこのページから実行可能です。右部のリンクをクリックしてください。',
"Level is higher than parent's": '親情報よりも高いレベルです',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "注意: SMS は'アクション可能'のためリクエストがフィルターされます。一方、ツイートのリクエストはフィルターされません。よって、これは検索する手段となります",
"Need a 'url' argument!": "'url'引数が必要です。",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "オプション項目。ジオメトリカラムの名称です。PostGISでのデフォルト値は 'the_geom'となります。",
"Parent level should be higher than this record's level. Parent level is": '親レベルは、このレコードのレベルより上位でなければなりません。親レベルは',
"Password fields don't match": 'パスワードが一致しません。',
"Phone number to donate to this organization's relief efforts.": 'この団体の救援活動に対して寄付を行う際の連絡先となる電話番号を記載します。',
"Please come back after sometime if that doesn't help.": 'この方法で問題が解決しない場合は、しばらく時間を置いて再度アクセスしてください。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "'Delete Old'ボタンを押すことで、データを参照しているレコードは全て参照先を再指定され、古い方のレコードは削除されます。",
"Quantity in %s's Inventory": '%s 倉庫にある量',
"Search here for a person's record in order to:": '人物情報の検索を行い、以下の機能を実現します:',
"Select a person in charge for status 'assigned'": "状況が '割り当て済み' である担当者を選択します",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For
|
example, if 'district' is the smallest division in the hierarchy, then all specific loc
|
ations would be required to have a district as a parent.": 'もし全ての特定の場所が住所階層の最下層で親の場所を必要とするなら、これを選択して下さい。例えば、もし「地区」が階層の最小の地域なら、全ての特定の場所は親階層の地区を持っている必要が有るでしょう。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'もし全ての特定の場所が住所階層での親の場所を必要とするなら、これを選択して下さい。これは被災地の「地域」表示の設定に役立てられます。',
"Sorry, things didn't get done on time.": 'すいません、時間通りに行われていません。',
"Sorry, we couldn't find that page.": 'すいません、お探しのページは見つかりませんでした。',
"System's Twitter account updated": 'システムのTwitterアカウントを変更しました',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "この線、あるいは面の<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>具体的な説明</a>",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": 'このプロジェクトの資金提供組織を選択します。複数の項目を選択するには、Ctrlキーを押しながらクリックしてください。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": 'この団体の活動分野を選択します。複数の項目を選択するには、コントロールキーを押しながらクリックしてください。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '画像ファイルのURLです。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"The person's manager within this Office/Project.": 'このオフィス/プロジェクトのマネージャ。',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '遺体の検索を行うには、遺体のID番号を入力してください。検索時のワイルドカード文字として、%を使うことができます。入力せずに「検索」すると、全ての遺体が表示されます。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'ID情報を入力することで、遺体を検索します。ワイルドカードとして % が使用できます。何も指定せずに「検索」すると、全ての遺体が表示されます。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "病院を検索するには、名前、病院のID、団体名、省略名のいずれかをスペース(空白)で区切って入力してください。 % がワイルドカードとして使えます。全病院のリストを表示するにはなにも入力せずに '検索' ボタンを押してください。",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '探し出したい病院をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての病院を表示します。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '病院を検索するには、名称の一部かIDを入力してください。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」を押した場合、全ての病院を表示します。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "ロケーションを検索するには、名前を入力します。%をワイルドカード文字として使用することが出来ます。何も入力しないで '検索' をクリックするとすべてのロケーションが表示されます。",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '苗字、名前などを半角スペースで区切って入力し、人物検索して下さい。「%」を使うとファジー検索できます。何も入力せずに検索すれば、全ての情報を検索表示します。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '人を検索するためには、お名前(苗字、名前または両方)を入力してください。また姓名の間にはスペースをいれてください。ワイルドカードとして % が使えます。すべての人物情報をリストするには、検索ボタンをおしてください。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '探し出したい支援要請をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入
|
j-mracek/dnf
|
tests/test_config.py
|
Python
|
gpl-2.0
| 5,503
| 0.000545
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
import argparse
import dnf.conf
import dnf.conf.read
import dnf.exceptions
from dnf.conf import Option, BaseConfig, Conf, RepoConf
import tests.support
from tests.support import mock
class CacheTest(tests.support.TestCase):
@mock.patch('dnf.util.am_i_root', return_value=True)
@mock.patch('dnf.const.SYSTEM_CACHEDIR', '/var/lib/spinning')
def test_root(self, unused_am_i_root):
conf = dnf.conf.Conf()
self.assertEqual(conf.system_cachedir, '/var/lib/spinning')
self.assertEqual(conf.cachedir, '/var/lib/spinning')
@mock.patch('dnf.yum.misc.getCacheDir',
return_value="/notmp/dnf-walr-yeAH")
@mock.patch('dnf.util.am_i_root', return_value=False)
@mock.patch('dnf.const.SYSTEM_CACHEDIR', '/var/lib/spinning')
def test_noroot(self, fn_root, fn_getcachedir):
self.assertEqual(fn_getcachedir.call_count, 0)
conf = dnf.conf.Conf()
self.assertEqual(conf.cachedir, '/notmp/dnf-walr-yeAH')
self.assertEqual(fn_getcachedir.call_count, 1)
class ConfTest(tests.support.TestCase):
def test_bugtracker(self):
conf = Conf()
self.assertEqual(conf.bugtracker_url,
"https://bugzilla.redhat.com/enter_bug.cgi" +
"?product=Fedora&component=dnf")
def test_conf_from_file(self):
conf = Conf()
# defaults
self.assertFalse(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 3)
self.assertTrue(conf.clean_requirements_on_remove)
conf.config_file_path = '%s/etc/dnf/dnf.conf' % tests.support.dnf_toplevel()
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
self.assertTrue(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 3)
self.assertTrue(conf.clean_requirements_on_remove)
def test_overrides(self):
conf = Conf()
self.assertFalse(conf.assumeyes)
self.assertFalse(conf.assumeno)
self.assertEqual(conf.color, 'auto')
opts = argparse.Namespace(assumeyes=True, color='never')
conf._configure_from_options(opts)
self.assertTrue(conf.assumeyes)
self.assertFalse(conf.assumeno) # no change
self.assertEqual(conf.color, 'never')
def test_order_insensitive(self):
conf = Conf()
conf.config_file_path = '%s/etc/dnf/dnf.conf' % tests.support.dnf_toplevel()
opts = argparse.Namespace(
gpgcheck=False,
main_setopts={'installonly_limit': ['5']}
)
# read config
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
# update from commandline
conf._configure_from_options(opts)
self.assertFalse(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 5)
# and the other way round should have the same result
# update from commandline
conf._configure_from_options(opts)
# read config
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
self.assertFalse(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 5)
def test_inheritance1(self):
conf = Conf()
repo = RepoConf(conf)
# minrate is inherited from conf
# default should be the same
self.assertEqual(conf.minrate, 1000)
self.assertEqual(repo.minrate, 1000)
# after conf change, repoconf still should inherit its value
conf.minrate = 2000
self.assertEqual(conf.minrate, 2000)
self.assertEqual(repo.minrate, 2000)
def test_inheritance2(self):
|
conf = Conf()
# if repoconf reads value from config it no more inherits changes from conf
conf.config_file_path = tests.support.resource_path('etc/repos.conf')
with mock.patch('logging.Logger.warning'):
reader = dnf.conf.read.RepoReader(conf, {})
|
repo = list(reader)[0]
self.assertEqual(conf.minrate, 1000)
self.assertEqual(repo.minrate, 4096)
# after global change
conf.minrate = 2000
self.assertEqual(conf.minrate, 2000)
self.assertEqual(repo.minrate, 4096)
def test_prepend_installroot(self):
conf = Conf()
conf.installroot = '/mnt/root'
conf.prepend_installroot('persistdir')
self.assertEqual(conf.persistdir, '/mnt/root/var/lib/dnf')
def test_ranges(self):
conf = Conf()
with self.assertRaises(dnf.exceptions.ConfigError):
conf.debuglevel = '11'
|
telefonicaid/fiware-facts
|
tests/acceptance/fiwarecloto_client/client.py
|
Python
|
apache-2.0
| 4,297
| 0.002095
|
# -*- coding: utf-8 -*-
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = "@jframos"
from qautils.http.headers_utils import set_representation_headers, HEADER_REPRESENTATION_JSON
from qautils.logger.logger_utils import get_logger
from keystoneclient.v2_0 import Client as KeystoneClient
from fiwarecloto_client.tenantid_resource import TenantIdResourceClient
__logger__ = get_logger(__name__)
# HEADERS
X_AUTH_TOKEN = "X-Auth-Token"
TENANT_ID = "Tenant-Id"
class ClotoClient():
def __init__(self, username, password, tenant_id, auth_url, api_protocol, api_host, api_port, api_resource):
"""
Init a new Client for CLOTO component.
:param username (string): The username (OpenStack)
:param password (string): The password
:param tenant_id (string): TenantID
:param auth_url (string): Keystone/IdM auth URL
:param api_protocol (string): API protocol
:param api_host (string): API host
:param api_port (string): API port
:param api_resource (string): API base resource
:return: None
"""
__logger__.info("Init CLOTO Client")
__logger__.debug("Client parameters: Username: %s, Password: %s, TenantId: %s, API protocol: %s, API host: %s, "
"API port: %s, Base resource: %s", username, password, tenant_id, api_protocol, api_host,
api_port, api_resource)
self.headers = dict()
self.api_protocol = api_protocol
self.api_host = api_host
self.api_port = api_port
self.api_resource = api_resource
set_representation_headers(self.headers, content_type=HEADER_REPRESENTATION_JSON,
accept=HEADER_REPRESENTATION_JSON)
self._init_keystone_client(username, password, tenant_id, auth_url)
self.token = self._get_auth_token()
__logger__.debug("Token: %s", self.token)
self.headers.update({X_AUTH_TOKEN: self.token})
self.headers.update({TENANT_ID: tenant_id})
__logger__.debug("Headers with OpenStack credentials: %s", self.headers)
def _init_keystone_client(self, username, password, tenant_id, auth_url):
"""
Init the keystone client to request token and endpoint data
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string tenant_id: Tenant id.
:param string auth_url: Keystone service endpoint for authorization.
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:return None
"""
__logger__.debug("Init Keystone Client")
self.keystone_client = KeystoneClient(username=username, pa
|
ssword=password, tenant_id=tenant_id,
auth_url=auth_url)
def _get_auth_token(self):
"""
Get token from Keystone
:return: Token (String)
"""
__logger__.debug("Getting auth Token")
return self.keystone_client.auth_ref['token']['id']
def get_tena
|
nt_id_resource_client(self):
"""
Create an API resource REST client
:return: Rest client for 'TenantId' API resource
"""
__logger__.info("Creating TenantIdResource")
return TenantIdResourceClient(protocol=self.api_protocol, host=self.api_host,
port=self.api_port, resource=self.api_resource, headers=self.headers)
|
elamperti/bastardbot
|
webserver.py
|
Python
|
mit
| 507
| 0.013807
|
#!/usr/bin/env python
#import logging
from webserver import *
if __name__ == '__main__':
#logging.basicConfig(
# format="[%(asctime)s] %(name)s/%(levelname)-6s - %(message)s",
# level=logging.CRITICAL,
# datefmt='%
|
Y-%m-%d %H:%M:%S'
#)
# Only enable debug level for
|
bbot
#logger = logging.getLogger('bastardbot')
#logger.setLevel(logging.DEBUG)
print("Initializing BastardBot web server...")
B = BastardBot()
print("BastardBot server stopped.")
|
jsbueno/sc.blueprints.soundcloud
|
sc/blueprints/soundcloud/setuphandlers.py
|
Python
|
gpl-2.0
| 386
| 0
|
# -*- coding: u
|
tf-8 -*-
import logging
# define here the methods needed to be run at install time
|
def importVarious(context):
if context.readDataFile('sc.blueprints.soundcloud_various.txt') is None:
return
logger = logging.getLogger('sc.blueprints.soundcloud')
# add here your custom methods that need to be run when
# sc.blueprints.soundcloud is installed
|
ronaldahmed/labor-market-demand-analysis
|
rule based major_extractor/count_custom_vhs.py
|
Python
|
mit
| 4,738
| 0.041368
|
import os, sys
import json
import copy
import numpy as np
import random
from multiprocessing import Pool
import ipdb
################################################################################################
utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts')
source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria'
#source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia'
#treemap_name = 'carreras_rubro_mina'
#adj_name = 'ing_total_adjmatrix'
treemap_name = 'carreras_mantto_mina'
adj_name = 'mantto_mina_adjmatrix'
class LabelDict(dict):
def __init__(self, label_names=[]):
self.names = []
for name in label_names:
self.add(name)
def add(self, name):
label_id = len(self.names)
if name in self:
#warnings.warn('Ignoring duplicated label ' + name)
return self[name]
self[name] = label_id
self.names.append(name)
return label_id
def get_label_name(self, label_id):
return self.names[label_id]
def get_label_id(self, name):
if name not in self:
return -1
return self[name]
def size(self):
return len(self.names)
################################################################################################
hierarchy = json.loads(open('carreras_ing2.json').read())
# docname : {docname : true name}
nameByFile = json.loads(open('ident_names2.json').read())
fileByName = {}
temp={}
for (file,name) in nameByFile.items():
temp[file.strip(' ')] = name.strip(' ')
fileByName[name.strip(' ')] = file.strip(' ')
nameByFile = dict(temp)
################################################################################################
def sorter(T,sizeById, file_dict):
if "children" not in T:
_id = file_dict.get_label_id(fileByName[T["name"]])
try:
T["size"] = int(sizeById[_id])
except:
T["size"] = sizeById[_id]
return float(T["size"])
children = T["children"]
temp = []
_total = 0
for child in children:
subt_sum = sorter(child,sizeById, file_dict)
_total += subt_sum
temp.append(subt_sum)
temp = list(zip(temp,range(len(children))))
temp.sort(reverse=True)
T["children"] = [children[k[1]] for k in temp]
return _total
def getSortedLeaves(T, V,file_dict):
if "children" not in T:
fn = fileByName[ T["name"] ]
V.append(file_dict.get_label_id(fn))
return
for child in T["children"]:
getSortedLeaves(child,V,file_dict)
################################################################################################
################################################################################################
if __name__=='__main__':
vh_dict = LabelDict()
file_dict = LabelDict()
graph = np.zeros([30,30])
vhsByFile = [set() for i in range(30)]
freq_major = np.zeros([30])
for root,dirs,filenames in os.walk(source_vh_dir):
for f in filenames:
if f[-1]!='~':
#file_name = f[3:] # vh_name
#if file_name=='all' or file_name=='ing':
# continue
p = f.find('_mineria')
#p = f.find('_mantto_mineria')
file_name = f[3:p] # vh_name_mineria
#file_name = f[14:] # mantto_min_vh_name
id_file = file_dict.add(file_name)
for line in open(os.path.join(source_vh_dir,f)):
line = line.strip('\n')
if line!='':
id_vh = vh_dict.add(line)
freq_major[id_file]+=1
vhsByFile[id_file].add(id_vh)
count_id_vh = vh_dict.size()
count_id_file = file_dict.size()
print(count_id_vh)
print(count_id_file)
ipdb.set_trace()
# node
for k in range(count_id_file):
# posible edges
outgoing = set()
for i in range(count_id_file):
if k!=i:
temp = vhsByFile[k] & vhsByFile[i]
graph[k,i] = len(temp)
outgoing |= temp
graph[k,k] = freq_major[k] - len(outgoing)
# GENERATE CARRERAS.JSON
tot = sorter(hierarchy,freq_major,file_dict)
open(tr
|
eemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2))
per_hierarchy = dict(hierarchy)
temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh]
tot = sorter(per_hierarchy,temp,file_dict)
open(treemap_name+'_perc.
|
json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2))
# GENERATE ADJMATRIX.JSON
sorted_ids = []
getSortedLeaves(hierarchy,sorted_ids,file_dict)
adjmatrix = []
for k in sorted_ids:
if freq_major[k]==0:
continue
u = file_dict.get_label_name(k)
item = dict()
item["name"] = nameByFile[u]
item["size"] = int(freq_major[k])
item["imports"] = []
for i in sorted_ids:
if graph[k,i]>0:
v = file_dict.get_label_name(i)
imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])})
item["imports"].append(imp)
adjmatrix.append(item)
open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2))
|
gkc1000/pyscf
|
pyscf/gto/test/test_ecp.py
|
Python
|
apache-2.0
| 10,394
| 0.010391
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import lib
cu1_basis = gto.basis.parse('''
H S
1.8000000 1.0000000
H S
2.8000000 0.0210870 -0.0045400 0.0000000
1.3190000 0.3461290 -0.1703520 0.0000000
0.9059000 0.0393780 0.1403820 1.0000000
H P
2.1330000 0.0868660 0.0000000
1.2000000 0.0000000 0.5000000
0.3827000 0.5010080 1.0000000
H D
0.3827000 1.0000000
H F
2.1330000 0.1868660 0.0000000
0.3827000 0.2010080 1.0000000
''')
mol = gto.M(atom='''
Cu1 0. 0. 0.
Cu 0. 1. 0.
He 1. 0. 0.
''',
basis={'Cu':'lanl2dz', 'Cu1': cu1_basis, 'He':'sto3g'},
ecp = {'cu':'lanl2dz'})
mol1 = gto.M(atom='''
Cu1 0. 0. 0.
Cu 0. 1. 0.
He 1. 0. 0.
Ghost-Cu1 0. 0. 0.0001
''',
basis={'Cu':'lanl2dz', 'Cu1': cu1_basis, 'He':'sto3g'},
ecp = {'cu':'lanl2dz'})
mol2 = gto.M(atom='''
Cu1 0. 0. 0.
Cu 0. 1. 0.
He 1. 0. 0.
Ghost-Cu1 0. 0. -0.0001
''',
basis={'Cu':'lanl2dz', 'Cu1': cu1_basis, 'He':'sto3g'},
ecp = {'cu':'lanl2dz'})
def tearDownModule():
global mol, mol1, mol2, cu1_basis
del mol, mol1, mol2, cu1_basis
class KnownValues(unittest.TestCase):
def test_ecp_by_shell(self):
for i in (0,2,3,6,9):
for j in (1,2,3,5,6):
ref = mol.intor_by_shell('ECPscalar_sph', (i,j))
dat = gto.ecp.type1_by_shell(mol, (i, j))
dat+= gto.ecp.type2_by_shell(mol, (i, j))
self.assertAlmostEqual(abs(ref-dat).max(), 0, 12)
ref = mol.intor_by_shell('ECPscalar_cart', (i,j))
dat = gto.ecp.type1_by_shell(mol, (i, j), cart=True)
dat+= gto.ecp.type2_by_shell(mol, (i, j), cart=True)
self.assertAlmostEqual(abs(ref-dat).max(), 0, 12)
def test_nr_rhf(self):
mol = gto.M(atom='Na 0. 0. 0.; H 0. 0. 1.',
basis={'Na':'lanl2dz', 'H':'sto3g'},
ecp = {'Na':'lanl2dz'},
verbose=0)
self.assertAlmostEqual(lib.fp(mol.intor('ECPscalar')), -0.19922134780248762, 9)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.45002315563472206, 10)
def test_bfd(self):
mol = gto.M(atom='H 0. 0. 0.',
basis={'H':'bfd-vdz'},
ecp = {'H':'bfd-pp'},
spin = 1,
verbose=0)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.499045, 6)
mol = gto.M(atom='
|
Na 0. 0. 0.',
basis={'Na':'bfd-vtz'},
ecp = {'Na':'bfd-pp'},
spin = 1,
verbose=0)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.181799, 6)
mol = gto.M(atom='Mg 0. 0. 0.',
basis={'Mg':'bfd-vtz'},
ecp = {'Mg':'bfd-pp'},
|
spin = 0,
verbose=0)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.784579, 6)
# mol = gto.M(atom='Ne 0. 0. 0.',
# basis={'Ne':'bfd-vdz'},
# ecp = {'Ne':'bfd-pp'},
# verbose=0)
# mf = scf.RHF(mol)
# self.assertAlmostEqual(mf.kernel(), -34.709059, 6)
def test_ecp_grad(self):
aoslices = mol.aoslice_nr_by_atom()
ish0, ish1 = aoslices[0][:2]
for i in range(ish0, ish1):
for j in range(mol.nbas):
shls = (i,j)
shls1 = (shls[0] + mol.nbas, shls[1])
ref = (mol1.intor_by_shell('ECPscalar_cart', shls1) -
mol2.intor_by_shell('ECPscalar_cart', shls1)) / 0.0002 * lib.param.BOHR
dat = mol.intor_by_shell('ECPscalar_ipnuc_cart', shls, comp=3)
self.assertAlmostEqual(abs(-dat[2]-ref).max(), 0, 4)
def test_ecp_iprinv(self):
mol = gto.M(atom='''
Cu 0. 0. 0.
H 1. 0. 0.
''',
basis={'Cu':'lanl2dz', 'H':'ccpvdz'},
ecp = {'cu':'lanl2dz'})
mol1 = gto.M(atom='''
Cu 0. 0. 0.
H 1. 0. 0.
Ghost-Cu 0. 0. 0.0001
''',
basis={'Cu':'lanl2dz', 'H':'ccpvdz'},
ecp = {'cu':'lanl2dz'})
mol2 = gto.M(atom='''
Cu 0. 0. 0.
H 1. 0. 0.
Ghost-Cu 0. 0. -0.0001
''',
basis={'Cu':'lanl2dz', 'H':'ccpvdz'},
ecp = {'cu':'lanl2dz'})
aoslices = mol.aoslice_nr_by_atom()
ish0, ish1 = aoslices[0][:2]
for i in range(ish0, ish1):
for j in range(mol.nbas):
shls = (i,j)
shls1 = (shls[0] + mol.nbas, shls[1])
ref = (mol1.intor_by_shell('ECPscalar_cart', shls1) -
mol2.intor_by_shell('ECPscalar_cart', shls1)) / 0.0002 * lib.param.BOHR
with mol.with_rinv_at_nucleus(0):
dat = mol.intor_by_shell('ECPscalar_iprinv_cart', shls, comp=3)
self.assertAlmostEqual(abs(-dat[2]-ref).max(), 0, 4)
def test_ecp_hessian(self):
aoslices = mol.aoslice_nr_by_atom()
ish0, ish1 = aoslices[0][:2]
for i in range(ish0, ish1):
for j in range(mol.nbas):
shls = (i,j)
shls1 = (shls[0] + mol.nbas, shls[1])
ref =-(mol1.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3) -
mol2.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3)) / 0.0002 * lib.param.BOHR
dat = mol.intor_by_shell('ECPscalar_ipipnuc_cart', shls, comp=9)
di, dj = dat.shape[1:]
dat = dat.reshape(3,3,di,dj)
self.assertAlmostEqual(abs(dat[2]-ref).max(), 0, 3)
for i in range(mol.nbas):
for j in range(ish0, ish1):
shls = (i,j)
shls1 = (shls[0], shls[1] + mol.nbas)
ref =-(mol1.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3) -
mol2.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3)) / 0.0002 * lib.param.BOHR
dat = mol.intor_by_shell('ECPscalar_ipnucip_cart', shls, comp=9)
di, dj = dat.shape[1:]
dat = dat.reshape(3,3,di,dj)
self.assertAlmostEqual(abs(dat[:,2]-ref).max(), 0, 3)
def test_pp_int(self):
from pyscf import gto, scf
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
from pyscf.pbc import df
cell = pbcgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],
'C' :'gth-szv',}
cell.pseudo = {'C':'gth-pade',
'He': pbcgto.pseudo.parse('''He
2
0.40000000 3 -1.98934751 -0.75604821 0.95604821
2
0.29482550 3 1.23870466 .855 .3
.71 -1.1
|
almarklein/scikit-image
|
skimage/transform/tests/test_geometric.py
|
Python
|
bsd-3-clause
| 7,870
| 0
|
import numpy as np
from numpy.testing import (assert_equal, assert_array_almost_equal,
assert_raises)
from skimage.transform._geometric import _stackcopy
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6
|
.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_array_almost_equal(x[..., i
|
], y)
def test_estimate_transform():
for tform in ('similarity', 'affine', 'projective', 'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
assert_raises(ValueError, estimate_transform, 'foobar',
SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform._matrix))
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.shear, shear)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.shear, shear)
assert_array_almost_equal(tform2.translation, translation)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_array_almost_equal(tform(SRC), DST)
assert_array_almost_equal(tform.inverse(DST), SRC)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform._matrix)
assert_array_almost_equal(tform2._matrix, tform._matrix)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_array_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform._params)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_inverse():
assert_raises(Exception, PolynomialTransform().inverse, 0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
assert tform.__class__ == ProjectiveTransform
def test_geometric_tform():
tform = GeometricTransform()
assert_raises(NotImplementedError, tform, 0)
assert_raises(NotImplementedError, tform.inverse, 0)
assert_raises(NotImplementedError, tform.__add__, 0)
def test_invalid_input():
assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform, np.zeros((2, 3)))
assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, SimilarityTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3)))
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
antoinecarme/pyaf
|
tests/artificial/transf_BoxCox/trend_MovingAverage/cycle_12/ar_12/test_artificial_32_BoxCox_MovingAverage_12_12_20.py
|
Python
|
bsd-3-clause
| 266
| 0.086466
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.pro
|
cess_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform
|
= "BoxCox", sigma = 0.0, exog_count = 20, ar_order = 12);
|
ufjfeng/leetcode-jf-soln
|
python/220_contains_duplicate_iii.py
|
Python
|
mit
| 1,718
| 0.001746
|
"""
Given an array of integers, find out whether there are two distinct indices i
and j in the array such that the difference between nums[i] and nums[j] is at
most t and the difference between i and j is at most k.
"""
class Solution(object):
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
if nums is None or nums == []:
return False
if k < 1 or t < 0:
return False
buckets = collections.defaultdict(int)
length = len(nums)
width = t + 1
for i in range(length):
key = nums[i] // width
if key in buckets:
return True
if key - 1 in buckets and abs(nums[i] - buckets[key - 1]) < width:
return True
if key + 1 in buckets and abs(nums[i] - buckets[key + 1]) < width:
return True
buckets[ke
|
y] = nums[i]
if i >= k:
del buckets[nums[i - k] // width]
return False
import collections
a = Solution()
print(a.containsNearbyAlmostDuplicate([-1, -1], 1, 0))
print(a.containsNearbyAlmostDuplicate([1, 3, 1], 1, 1))
print(a.containsNearbyAlmostDuplicate([10, 20, 30, 25, 50], 2, 6))
"""
Note:
The idea is like the bucket sort algorithm. Suppose we have consecutive buckets
covering the range of nums with each bucket a width of (t+1). If there are two
item with
|
difference <= t, one of the two will happen:
(1) the two in the same bucket
(2) the two in neighbor buckets
https://discuss.leetcode.com/topic/27608/java-python-one-pass-solution-o-n-time-o-n-space-using-buckets
"""
|
Sparsh-Sharma/SteaPy
|
steapy/freestream.py
|
Python
|
mit
| 651
| 0.004608
|
import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
class Freestream:
|
"""
Freestream conditions.
"""
def __init__(self, u_inf=1.0, alpha=0.0):
|
"""
Sets the freestream speed and angle (in degrees).
Parameters
----------
u_inf: float, optional
Freestream speed;
default: 1.0.
alpha: float, optional
Angle of attack in degrees;
default 0.0.
"""
self.u_inf = u_inf
self.alpha = alpha*numpy.pi/180.0 # degrees to radians
|
ClinGen/clincoded
|
src/contentbase/upgrader.py
|
Python
|
mit
| 6,948
| 0.000288
|
from pkg_resources import parse_version
from pyramid.interfaces import (
PHASE1_CONFIG,
PHASE2_CONFIG,
)
import venusian
def includeme(config):
config.registry['migrator'] = Migrator()
config.add_directive('add_upgrade', add_upgrade)
config.add_directive('add_upgrade_step', add_upgrade_step)
config.add_directive('set_upgrade_finalizer', set_upgrade_finalizer)
config.add_directive(
'set_default_upgrade_finalizer', set_default_upgrade_finalizer)
config.add_request_method(upgrade, 'upgrade')
class ConfigurationError(Exception):
pass
class UpgradeError(Exception):
pass
class UpgradePathNotFound(UpgradeError):
def __str__(self):
return "%r from %r to %r (at %r)" % self.args
class VersionTooHigh(UpgradeError):
pass
class Migrator(object):
""" Migration manager
"""
def __init__(self):
self.schema_migrators = {}
self.default_finalizer = None
def add_upgrade(self, schema_name, version, finalizer=None):
if schema_name in self.schema_migrators:
raise ConfigurationError('duplicate schema_name', schema_name)
if finalizer is None:
finalizer = self.default_finalizer
schema_migrator = SchemaMigrator(schema_name, versi
|
on, finalizer)
self.schema_migrators[schema_name] = schema_migrator
def upgrade(self, schema_name, value,
current_version='', target_version=None, **kw):
schema_migrator = self.schema_migrators[schema_name]
return schema_migrator.upgrade(
value, current_version, target_version, **kw)
def __getitem__(self, schema_name):
return self.schema_migrators[schema_name]
def __contains__(self, schema_name):
return schema_name
|
in self.schema_migrators
class SchemaMigrator(object):
""" Manages upgrade steps
"""
def __init__(self, name, version, finalizer=None):
self.__name__ = name
self.version = version
self.upgrade_steps = {}
self.finalizer = finalizer
def add_upgrade_step(self, step, source='', dest=None):
if dest is None:
dest = self.version
if parse_version(dest) <= parse_version(source):
raise ValueError("dest is less than source", dest, source)
if parse_version(source) in self.upgrade_steps:
raise ConfigurationError('duplicate step for source', source)
self.upgrade_steps[parse_version(source)] = UpgradeStep(step, source, dest)
def upgrade(self, value, current_version='', target_version=None, **kw):
if target_version is None:
target_version = self.version
if parse_version(current_version) > parse_version(target_version):
raise VersionTooHigh(self.__name__, current_version, target_version)
# Try to find a path from current to target versions
steps = []
version = current_version
# If no entry exists for the current_version, fallback to ''
if parse_version(version) not in self.upgrade_steps:
try:
step = self.upgrade_steps[parse_version('')]
except KeyError:
pass
else:
if parse_version(step.dest) >= parse_version(version):
steps.append(step)
version = step.dest
while parse_version(version) < parse_version(target_version):
try:
step = self.upgrade_steps[parse_version(version)]
except KeyError:
break
steps.append(step)
version = step.dest
if version != target_version:
raise UpgradePathNotFound(
self.__name__, current_version, target_version, version)
# Apply the steps
system = {}
system.update(kw)
for step in steps:
next_value = step(value, system)
if next_value is not None:
value = next_value
if self.finalizer is not None:
next_value = self.finalizer(value, system, version)
if next_value is not None:
value = next_value
return value
class UpgradeStep(object):
def __init__(self, step, source, dest):
self.step = step
self.source = source
self.dest = dest
def __call__(self, value, system):
return self.step(value, system)
# Imperative configuration
def add_upgrade(config, schema_name, version, finalizer=None):
if finalizer is not None:
config.set_upgrade_finalizer(schema_name, finalizer)
def callback():
migrator = config.registry['migrator']
migrator.add_upgrade(schema_name, version)
config.action(
('add_upgrade', schema_name),
callback, order=PHASE2_CONFIG)
def add_upgrade_step(config, schema_name, step, source='', dest=None):
def callback():
migrator = config.registry['migrator']
migrator[schema_name].add_upgrade_step(step, source, dest)
config.action(
('add_upgrade_step', schema_name, parse_version(source)),
callback)
def set_upgrade_finalizer(config, schema_name, finalizer):
def callback():
migrator = config.registry['migrator']
migrator[schema_name].finalizer = finalizer
config.action(
('set_upgrade_finalizer', schema_name),
callback)
def set_default_upgrade_finalizer(config, finalizer):
def callback():
migrator = config.registry['migrator']
migrator.default_finalizer = finalizer
config.action(
'set_default_upgrade_finalizer',
callback, order=PHASE1_CONFIG)
# Declarative configuration
def upgrade_step(schema_name, source='', dest=None):
""" Register an upgrade step
"""
def decorate(step):
def callback(scanner, factory_name, factory):
scanner.config.add_upgrade_step(schema_name, step, source, dest)
venusian.attach(step, callback, category='migrator')
return step
return decorate
def upgrade_finalizer(schema_name):
""" Register a finalizer
"""
def decorate(finalizer):
def callback(scanner, factory_name, factory):
scanner.config.set_upgrade_finalizer(schema_name, finalizer)
venusian.attach(finalizer, callback, category='migrator')
return finalizer
return decorate
def default_upgrade_finalizer(finalizer):
def callback(scanner, factory_name, factory):
scanner.config.set_default_upgrade_finalizer(finalizer)
venusian.attach(finalizer, callback, category='migrator')
return finalizer
# Upgrade
def upgrade(request, schema_name, value,
current_version='', target_version=None, **kw):
migrator = request.registry['migrator']
return migrator.upgrade(
schema_name, value, current_version='', target_version=None,
request=request, **kw)
|
PuchatekwSzortach/travelling_salesman_problem
|
main.py
|
Python
|
mit
| 1,223
| 0.004906
|
import tsp.algorithms
import time
if __name__ == "__main__":
cities_number = 5
max_distance = 100
distances_matrix = tsp.algorithms.get_random_distances_matrix(cities_number, max_distance)
start = time.time()
optimal_path = tsp.algorithms.BruteForceTSPSolver(distances_matrix).solve()
print("Optimal path is " + str(optimal_path))
pr
|
int("Distance is " + str(tsp.algorithms.get_trip_distance(distances_matrix, optimal_path)))
print("Computational time is: {0:.2f} seconds".format(time.time() - start))
start = time.time()
worst_path = tsp.algorithms.BruteForceTSPWorstPathSolver(distances_matrix).solve()
print("\nWorst path is " + str(worst_path))
print("Distance is " + str(tsp.algorithms.get_trip_d
|
istance(distances_matrix, worst_path)))
print("Computational time is: {0:.2f} seconds".format(time.time() - start))
start = time.time()
boltzmann_path = tsp.algorithms.BoltzmannMachineTSPSolver(distances_matrix).solve()
print("\nBoltzmann path is " + str(boltzmann_path))
print("Distance is " + str(tsp.algorithms.get_trip_distance(distances_matrix, boltzmann_path)))
print("Computational time is: {0:.2f} seconds".format(time.time() - start))
|
maehler/seqpoet
|
seqpoet/tests/test_sequence.py
|
Python
|
mit
| 1,544
| 0.000648
|
import os
import re
from nose.tools import raises
import seqpoet
class TestSequence:
def setup(self):
self.seq1 = 'ACATacacagaATAgagaCacata'
self.illegal = 'agagcatgcacthisisnotcorrect'
def test_sequence_length(self):
s = seqpoet.Sequence(self.seq1)
assert len(s) == len(self.seq1)
def test_casing(self):
s = seqpoet.Sequence(self.seq1)
assert re.match('^[acgt]+$', str(s))
def test_reverse_complement(self):
s = seqpoet.Sequence(self.seq1)
s2 = seqpoet.Sequence('acct')
assert s.revcomp() == 'tatgtgtctctattctgtgtatgt', \
'"{0}" is not "tatgtgtctctattctgtgtatgt"'.format(s.revcomp().seq)
assert s2.revcomp() == 'aggt', \
'"{0}" is not "aggt"'.format(s2.revcomp().seq)
def test_str(self):
s = seqpoet.Sequence(self.seq1)
assert str(s) == self.seq1.lower()
def test_repr(self):
s = seqpoet.Sequence(self.seq1)
assert repr(s)
|
== '<Sequence: acata...>'
assert repr(s.revcomp()) == '<Sequence: tatgt...>'
def test_indexing(self):
s = seqpoet.Sequence(self.seq1)
assert s[4] == 'a'
assert s[:5] == 'acata'
assert s[-6:] == 'cacata'
assert s[4:8] == 'acac'
def test_equality(self):
s = seqpoet.Sequence(self.seq1)
assert s == self.seq1.lower()
assert s[:3] == seqpoet.Sequence(self.seq1[:3])
@raises(Valu
|
eError)
def test_illegal_characters(self):
s = seqpoet.Sequence(self.illegal)
|
alex-pardo/6degrees
|
twython-master/twython-master/tests/test_auth.py
|
Python
|
gpl-2.0
| 3,025
| 0.001983
|
from twython import Twython, TwythonError, TwythonAuthError
from .config import app_key, app_secret, screen_name
import unittest
class TwythonAuthTestCase(unittest.TestCase):
def setUp(self):
self.api = Twython(app_key, app_secret)
self.bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET')
self.bad_api_invalid_tokens = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
'BAD_OT', 'BAD_OTS')
self.oauth2_api = Twython(app_key, app_secret, oauth_version=2)
self.oauth2_bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
oauth_version=2)
def test_get_authentication_tokens(self):
"""Test getting authentication tokens works"""
self.api.get_authent
|
ication_tokens(callback_url='http://google.com/',
force_login=True,
screen_name=screen_name)
def test_get_authentication_tokens_bad_tokens(self):
"""Test getting authentication tokens with bad tokens
raises TwythonAuthError"""
self.assertRaises(TwythonAuthError, self.bad_api.get_aut
|
hentication_tokens,
callback_url='http://google.com/')
def test_get_authorized_tokens_bad_tokens(self):
"""Test getting final tokens fails with wrong tokens"""
self.assertRaises(TwythonError, self.bad_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
def test_get_authorized_tokens_invalid_or_expired_tokens(self):
"""Test getting final token fails when invalid or expired tokens have been passed"""
self.assertRaises(TwythonError, self.bad_api_invalid_tokens.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
def test_get_authentication_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authentication_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authentication_tokens)
def test_get_authorization_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authorized_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
def test_obtain_access_token(self):
"""Test obtaining an Application Only OAuth 2 access token succeeds"""
self.oauth2_api.obtain_access_token()
def test_obtain_access_token_bad_tokens(self):
"""Test obtaining an Application Only OAuth 2 access token using bad app tokens fails"""
self.assertRaises(TwythonAuthError,
self.oauth2_bad_api.obtain_access_token)
def test_obtain_access_token_raises_error_when_oauth1(self):
"""Test when API is set for OAuth 1, obtain_access_token raises a
TwythonError"""
self.assertRaises(TwythonError, self.api.obtain_access_token)
|
nvbn/Unofficial-Google-Music-API
|
gmusicapi/protocol/metadata.py
|
Python
|
bsd-3-clause
| 8,707
| 0.004709
|
# -*- coding: utf-8 -*-
"""
All known information on metadata is exposed in ``gmusicapi.protocol.metadata.md_expectations``.
This holds a mapping of *name* to *Expectation*, where *Expectation* has
the following fields:
*name*
key name in the song dictionary (equal to the *name* keying ``md_expectations``).
*type*:
a string holding a `validictory <https://github.com/sunlightlabs/validictory>`__ type.
Possible values:
:'string':
str and unicode objects
:'integer':
ints, longs
:'number':
ints, longs and floats
:'boolean':
bools
:'object':
dicts
:'array':
lists and tuples
:'null':
``None``
:'any':
any type is possible
*mutable*:
``True`` if client can change the value.
*optional*:
``True`` if the key is not guaranteed to be present.
*volatile*:
``True`` if the key's value can change between observations without client mutation.
*depends_on*:
the name of the key we transform to take our value from, or ``None``.
These fields can never be changed: they are automatically set to
a modified form of some other field's value.
See *dependent_transformation* for more information.
*dependent_transformation*:
``None``, or a function ``lambda dependent_value: our_value``.
For example, the ``artistNorm`` field is automatically set to the lowercase
of the ``artist`` field.
So, ``artistNorm.depends_on == 'artist'``, and the *dependent_transformation* for
``artistNorm`` can be written as ``lambda artist: artist.lower()``.
*allowed_values*:
sequence of allowed values.
*explanation*:
an explanatory string, typically empty for obvious fields.
The above information is used to generate the documentation below.
If you find an example to clarify these expectations, please `submit an issue
<https://github.com/simon-weber/Unofficial-Google-Music-API/issues>`__.
"""
from collections import defaultdict, namedtuple
_Expectation = namedtuple(
'_Expectation',
[
'name', 'type', 'mutable', 'optional', 'volatile',
'depends_on', 'dependent_transformation',
'allowed_values', 'explanation'
]
)
class Expectation(_Expectation):
"""Instantiated to represent information about a single metadata key."""
#This class just wraps the namedtuple to provide easy construction and some methods.
def __new__(cls, name, type, mutable, optional, volatile=False,
depends_on=None, dependent_transformation=None,
allowed_values=None, explanation=''):
return cls.__bases__[0].__new__(
cls,
name, type, mutable, optional, volatile,
depends_on, dependent_transformation,
allowed_values, explanation
)
def get_schema(self):
"""Return a validictory schema for this key."""
schema = {}
schema["type"] = self.type
if self.type == "string":
schema["blank"] = True # allow blank strings
if self.optional:
schema["required"] = False
return schema
#: All the expectations.
_all_expts = [
Expectation(name, 'string', mutable=True, optional=False) for name in
(
'composer', 'album', 'albumArtist', 'genre', 'name', 'artist', 'comment',
)
] + [
Expectation(name, 'integer', mutable=True, optional=True) for name in
(
'disc', 'year', 'track', 'totalTracks', 'totalDiscs', 'explicitType',
)
] + [
Expectation(name, type_str, mutable=False, optional=False, explanation=explain)
for (name, type_str, explain) in
(
('durationMillis', 'integer',
'length of a song in milliseconds.'),
('id', 'string',
'a per-user unique id for this song; sometimes referred to as *server id* or *song id*.'),
('creationDate', 'integer', ''),
('type', 'integer',
'An e
|
num: 1: free/purchased, 2: uploaded/not matched, 6: uploaded/matched'),
('beatsPerMinute', 'integer',
"the server does not calculate this - it's just what was in track metadata"),
('subjectToCuration', 'boolean', 'meaning unknown.'),
('curatedByUser', 'boolean'
|
, 'meaning unknown'),
('curationSuggested', 'boolean', 'meaning unknown'),
)
] + [
Expectation(name, type_str, mutable=False, optional=True, explanation=explain)
for (name, type_str, explain) in
(
('storeId', 'string', 'an id of a matching track in the Play Store.'),
('reuploading', 'boolean', 'scan-and-match reupload in progress.'),
('albumMatchedId', 'string', 'id of matching album in the Play Store?'),
('pending', 'boolean', 'unsure; server processing (eg for store match) pending?'),
('url', 'string', 'meaning unknown.'),
('bitrate', 'integer', "bitrate in kilobytes/second (eg 320)."),
('playlistEntryId', 'string', 'identifies position in the context of a playlist.'),
('albumArtUrl', 'string', "if present, the url of an image for this song's album art."),
('artistMatchedId', 'string', 'id of a matching artist in the Play Store?'),
('albumPlaybackTimestamp', 'integer', 'UTC/microsecond timestamp: the last time this album was played?'),
('origin', 'array', '???'),
('artistImageBaseUrl', 'string', 'like albumArtUrl, but for the artist. May be blank.'),
('recentTimestamp', 'integer', 'UTC/microsecond timestamp: meaning unknown.'),
('deleted', 'boolean', ''),
('matchedId', 'string', 'meaning unknown; related to scan and match?'),
)
] + [
Expectation(name + 'Norm', 'string', mutable=False, optional=False,
depends_on=name,
dependent_transformation=lambda x: x.lower(),
explanation="automatically set to lowercase of *%s*." % name)
for name in
(
'artist', 'albumArtist', 'album'
)
] + [
# 0, 1, 5: no, down, up thumbs
Expectation('rating', 'integer', mutable=True,
optional=False, allowed_values=tuple(range(6)),
explanation='0 == no thumb, 1 == down thumb, 5 == up thumb.'),
Expectation('lastPlayed', 'integer', mutable=False, optional=True, volatile=True,
explanation='UTC/microsecond timestamp'),
Expectation('playCount', 'integer', mutable=True, optional=False),
Expectation('title', 'string', mutable=False, optional=False,
depends_on='name', dependent_transformation=lambda x: x,
explanation='misleading! automatically set to *name*.'),
Expectation('titleNorm', 'string', mutable=False, optional=False,
depends_on='name', dependent_transformation=lambda x: x.lower(),
explanation='misleading! automatically set to lowercase of *name*.'),
]
#Create the dict for client code. If they look up something we don't know about,
# give them a flexible immutable key.
_immutable_key = lambda: Expectation('unknown', 'any', mutable=False, optional=True)
md_expectations = defaultdict(_immutable_key)
for expt in _all_expts:
md_expectations[expt.name] = expt
#This code is a super-hack. KnownMetadataFields exists _purely_ for documentation.
#We want dynamic documentation based on _all_expts, but __doc__ isn't a writable field
#for non-{function, class, module} objects. So, we create a dummy class and dynamically
#create its docstring to be arbitrary reST that documents our expectations.
def detail_line(e):
"""Given an expectation, return a readable one-line explanation of it."""
fields = [fname for fname in ('mutable', 'optional', 'volatile')
if getattr(e, fname, None)]
if e.depends_on:
fields.append("depends_on=%s" % e.depends_on)
line = ', '.join(fields)
if line:
line = "*(%s)*" % line
return line
#Note the hackiness of this class.
dynamic_docs = """
**This class exists only for documentation; do not try to import it.**
Instead, client code should use ``gmusicapi.protocol.metadata.md_expectations``.
See `the code <https://github.com/simon-weber/Unofficial-Google-Music-API/blob
/develop/gmus
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.