repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Vysybyl/motuus
|
players/3d_model_sample.py
|
Python
|
gpl-3.0
| 1,369
| 0.006574
|
from motuus.play.base_player import BasePlayer
class Player(BasePlayer):
"""This is the main class of motuus.
Use it to process Movement objects as they come in and to bind them to multimedia events.
An instance of this class is kept alive throughout ever
|
y http session between the mobile device browser and the
computer.
If you need to store variables between inputs, you'll have to initialize them appropriately in the __init__ method.
Some useful variables are already present in the BasePlayer and can be called directly.
"""
def __init__(self, ):
# Calling super class init (ignore the following l
|
ine):
super(Player, self).__init__(graph3D=True)
# Initialize here variables that might be used at every new event.
def play(self, mov):
"""This method is called anytime a new Movement input comes in from the device.
Use it to process every new mov and bind it to multimedia event, etc.
PLEASE NOTE that you should avoid long processing tasks within this method. If the device transmission
frequency is set to 5Hz (5 new inputs per second) the server will only have 0.2 seconds to receive and
process every input.
Do not overload it!
"""
# Calling super class play (ignore the following line):
super(Player, self).play(mov)
|
eamonnmag/hepdata3
|
hepdata/modules/submission/views.py
|
Python
|
gpl-2.0
| 5,414
| 0.001293
|
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
from flask import Blueprint, render_template, request, jsonify
from flask_login import login_required, current_user
from invenio_db import db
from hepdata.ext.elasticsearch.admin_view.api import AdminIndexer
from hepdata.modules.email.api import send_cookie_email
from hepdata.modules.inspire_api.views import get_inspire_record_information
from hepdata.modules.permissions.models import SubmissionParticipant
from hepdata.modules.records.utils.submission import \
get_or_create_hepsubmission
from hepdata.modules.records.utils.workflow import create_record
blueprint = Blueprint(
'submission',
__name__,
url_prefix='/submit',
template_folder='templates',
static_folder='static'
)
@blueprint.route('', methods=['GET'])
@login_required
def submit_ui():
return render_template('hepdata_submission/submit.html')
@blueprint.route('', methods=['POST'])
@login_required
def submit_post():
inspire_id = request.form['inspire_id']
title = request.form['title']
reviewer_str = request.form['reviewer']
uploader_str = request.form['uploader']
message = request.form['message']
reviewer = parse_person_string(reviewer_str)[0]
uploader = parse_person_string(uploader_str)[0]
hepdata_submission = process_submission_payload(inspire_id=inspire_id,
title=title,
reviewer=reviewer,
uploader=uploader, message=message)
if hepdata_submission:
return jsonify({'success': True, 'message': 'Submission successful.'})
else:
return jsonify({'success': False, 'message': 'Submission unsuccessful.'})
def process_submission_payload(*args, **kwargs):
"""
Processes the submission payload.
:param inspire_id:
:param title:
:param reviewer:
:param uploader:
:param send_upload_email:
:return:
"""
if kwargs.get('inspire_id'):
content, status = get_inspire_record_information(kwargs.get('inspire_id'))
content["inspire_id"] = kwargs.get('inspire_id')
elif kwargs.get('title'):
content = {'title': kwargs.get('title')}
else:
raise ValueError(message="A title or inspire_id must be provided.")
record_information = create_record(content)
submitter_id = kwargs.get('submitter_id')
if submitter_id is None:
submitter_id = kwargs.get('user_id') if 'user_id' in kwargs else int(current_user.get_id())
hepsubmission = get_or_create_hepsubmission(record_information["recid"], submitter_id)
if kwargs.get('inspire_id'):
hepsubmission.inspire_id = kwargs.get('inspire_id')
db.session.add(hepsubmission)
reviewer_details = kwargs.get('reviewer')
reviewer = create_participant_record(
reviewer_details.get('name'),
reviewer_details.get('email'), 'reviewer', 'primary',
record_information['recid'])
hepsubmission.participants.append(reviewer)
uploader_details = kwargs.get('uploader')
uploader = create_participant_record(uploader_details.get('name'), uploader_details.get('email'),
'uploader', 'primary',
record_information['recid'])
hepsubmission.participants.append(uploader)
db.session.commit()
if kwargs.get('send_upload_email', True):
# Now Send Email only to the uploader first. The reviewer will be asked to
# review only when an upload has been performed.
message = kwargs.get('message', None)
send_cookie_email(uploader, record_information, message)
admin_idx = AdminIndexer()
admin_idx.index_submission(hepsubmission)
return hepsubmission
def create_participant_record(name, email, role, status, recid):
participant_record = SubmissionParticipant(full_name=name,
email=email,
status=status,
role=role,
publication_recid=recid)
return participant_record
def parse_person_string(person_string, separator="::"):
"""
Parses a string in the format name::email into separate parts.
:param person_string: e.g. John::j.p.a@c
|
ern.ch
:param separator: by default '::'
:return: name, email
"""
|
if separator in person_string:
string_parts = person_string.split(separator)
return {'name': string_parts[0], 'email': string_parts[1]},
return {'name': person_string, 'email': person_string}
|
wagnerand/addons-server
|
src/olympia/ratings/migrations/0004_auto_20210823_1255.py
|
Python
|
bsd-3-clause
| 573
| 0.001745
|
# Generated by Django 3.2.6 on 2021-08-23 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
|
dependencies = [
('ratings', '0003_auto_20210813_0941'),
]
operations = [
migrations.AlterField(
model_name='rating',
name='ip_address',
field=models.CharField(default='0.0.0.0', max_length=45),
),
migrations.AddIndex(
model_name='rating',
index=models.Index(fields=['ip_address'], name='reviews_ip_address_057fddfa'),
|
),
]
|
areebbeigh/anime-scraper
|
src/websites/gogoanime.py
|
Python
|
apache-2.0
| 4,738
| 0.002533
|
import os
import re
from src.config import TimeoutConfig
from src.scrape_utils.selectors import GoGoAnimeSelectors, LOAD_STATUS_SELECTOR
from src.scrape_utils.servers import StreamServers
from src.stream_servers.openupload import OpenUploadScraper
from src.stream_servers.mp4upload import Mp4UploadScraper
from src.stream_servers.yourupload import YourUploadScraper
from src.utils import printing
from src.utils.timeout import call_till_true
from src.utils import sort_nicely, printd
from .base_scraper import BaseScraper
class Scraper(BaseScraper):
def __init__(self, **kwargs):
super(Scraper, self).__init__(**kwargs)
self.driver.get(self.anime_url)
self.episodes_dict = {}
self.episodes_dict = self.fetch_episode_list()
self.server_scraper = self._get_server_scraper()
def _get_server_scraper(self):
scrapers = {
StreamServers.OPENUPLOAD: OpenUploadScraper(
self.driver, self.proxy, GoGoAnimeSelectors),
StreamServers.MP4UPLOAD: Mp4UploadScraper(
self.driver, self.proxy, GoGoAnimeSelectors),
StreamServers.YOURUPLOAD: YourUploadScraper(
self.driver, self.proxy, GoGoAnimeSelectors)
}
return scrapers[self.server]
def _execute_js_scripts(self):
js_libs = os.path.join(os.path.dirname(os.path.abspath(__file__)), "js")
load_episode_list_js = os.path.join(js_libs, "loadEpisodeList.js")
with open(load_episode_list_js, "r") as f:
load_episode_list = f.read()
self.driver.execute_script(load_episode_list)
def fetch_episode_list(self):
# -> { 'Episode 1': 'https://www.kickassanime.ru/anime/gintama/episode-1', ... }
if self.episodes_dict:
return self.episodes_dict
printd("fetching episode list")
printing.fetching_list(self.anime_url)
driver = self.driver
# Execute JS to load the entire episode list
self._execute_js_scripts()
ep_list_container = driver.find_element_by_css_selector(GoGoAnimeSelectors.EPISODE_LIST)
def fetch_ep_list(container):
return container.find_elements_by_css_selector(GoGoAnimeSelectors.EPISODE)
# Sometimes the episode list takes a while to load and fetch_ep_list gets 0 episodes
# call_till_true will keep trying for n seconds till we get >0 episodes
ep_list, calls, success = call_till_true(fetch_ep_list, TimeoutConfig.FETCHING_EPISODE_LIST, ep_list_container)
if not success:
# TODO: Change error raised
raise ValueError("Failed to fetch episode list")
printd("calls", calls)
# print(ep_list_container.text)
# print(len(ep_list))
ep_dict = {}
for ep in ep_list:
if ep.text:
episode_name = re.search(r"EP ([\d\-\.]+)", ep.text).group().replace("EP", "Episode")
ep_dict[episode_name] = ep.get_attribute("href")
# print(ep_dict)
return ep_dict
def fetch_episode(self, episode_name):
# -> { stream_page: http://.../watch/episode-01, stream_url: http://.../file.mp4 }
if episode_name in self.episodes_dict:
stream_page = self.episodes_dict[episode_name]
printd("Fetching", episode_name)
printing.fetching_episode(episode_name, stream_page)
# stream_url = self.server_scraper.fetch_stream_url(stream_page)
try:
stream_url = self.server_scraper.fetch_stream_url(stream_page)
except Exception as err:
printd(err)
stream_url = ""
result = {"stream_page": stream_page, "stream_url": stream_url}
printd(result)
printing.fetched_episode(episode_name, stream_url, True if stream_url else False)
return result
raise ValueError("%s does not exist" % episode_name)
def fetch_episode_number(self, episode_number):
for episode_name in self.episodes_dict:
if episode_number == int(episode_name.re
|
place("Episode ", "")):
return self.fetch_episode(episode_name)
raise ValueError("Episode %d does not exist" % episode_number)
def fetch_all_episodes(self, episodes_dict):
# -> { 'Episode 1': { 'stream_page': http://.../watch/episode-01, 'stream_url': http://.../file.mp4 } }
episode_names = list(self.episodes_dict.keys())
sort_nicely(episode_names)
for ep_name in episode_names:
try:
|
episodes_dict[ep_name] = self.fetch_episode(ep_name)
except ValueError:
episodes_dict[ep_name] = ""
|
sinner/testing-djrf
|
tutorial/snippets/models/TimeStampable.py
|
Python
|
mit
| 426
| 0
|
from django.db import models
class TimeStampable(models.Model):
"""TimeStampable"""
STATUS_CHOICES = (
('A', 'Active'),
('I', 'Inactive')
)
created_at = models.DateTimeField(aut
|
o_now_add=True, auto_now=False)
|
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
class Meta:
abstract = True
|
Kyria/LazyBlacksmith
|
lazyblacksmith/models/sde/solarsystem.py
|
Python
|
bsd-3-clause
| 345
| 0
|
# -*- encoding: utf-8 -*-
from . import db
class SolarSyste
|
m(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
name = db.Column(db.String(100), nullable=False)
region_id = db.Column(db.Integer, db.ForeignKey('region.id'))
constellation_id = db.Column(db.In
|
teger, db.ForeignKey('constellation.id'))
|
ProjectCalla/SomeCrawler
|
somecrawler/queue/QueueManager.py
|
Python
|
gpl-3.0
| 802
| 0.002494
|
__author__ = 'j'
from somecrawler.queue import PriorityQueue
from somecrawler.user import User, UserController
class QueueManager:
pQueue
|
= PriorityQueue.PQueue()
userCon = UserController.UserController()
def __init__(self):
pass
def add_to_queue(self, pQueue, job, priority):
pQueue.put(job, priority)
def create_user_priority_queue(self, pQueue):
userList = self.userCon.getAllUsers()
self.add_dict_to_queue(userList, pQueue)
def add_dict_to_queue(self, pQueue, dict):
for i in range(len(dict)):
job = dict
|
[str(i)]
pQueue.put(job, job.priority)
return pQueue
def emptyQueueDEBUG(self, pQueue):
i = 0
while not pQueue.empty():
print i, pQueue.get()
i += 1
|
xchenum/quantum
|
quantum/plugins/linuxbridge/tests/unit/test_database.py
|
Python
|
apache-2.0
| 11,211
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
"""
test_database.py is an independent test suite
that tests the database api method calls
"""
import logging
import unittest2 as unittest
import quantum.db.api as db
from quantum.openstack.common import cfg
import quantum.plugins.linuxbridge.common.exceptions as c_exc
import quantum.plugins.linuxbridge.db.l2network_db as l2network_db
LOG = logging.getLogger(__name__)
class L2networkDB(object):
"""Class conisting of methods to call L2network db methods"""
def get_all_vlan_bindings(self):
"""Get all vlan binding into a list of dict"""
vlans = []
try:
for vlan_bind in l2network_db.get_all_vlan_bindings():
LOG.debug("Getting vlan bindings for vlan: %s" %
vlan_bind.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(vlan_bind.vlan_id)
vlan_dict["net-id"] = str(vlan_bind.network_id)
vlans.append(vlan_dict)
except Exception, exc
|
:
LOG.error("Failed to get all vlan bindings: %s" % str(exc))
|
return vlans
def get_vlan_binding(self, network_id):
"""Get a vlan binding"""
vlan = []
try:
for vlan_bind in l2network_db.get_vlan_binding(network_id):
LOG.debug("Getting vlan binding for vlan: %s" %
vlan_bind.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(vlan_bind.vlan_id)
vlan_dict["net-id"] = str(vlan_bind.network_id)
vlan.append(vlan_dict)
except Exception, exc:
LOG.error("Failed to get vlan binding: %s" % str(exc))
return vlan
def create_vlan_binding(self, vlan_id, network_id):
"""Create a vlan binding"""
vlan_dict = {}
try:
res = l2network_db.add_vlan_binding(vlan_id, network_id)
LOG.debug("Created vlan binding for vlan: %s" % res.vlan_id)
vlan_dict["vlan-id"] = str(res.vlan_id)
vlan_dict["net-id"] = str(res.network_id)
return vlan_dict
except Exception, exc:
LOG.error("Failed to create vlan binding: %s" % str(exc))
def delete_vlan_binding(self, network_id):
"""Delete a vlan binding"""
try:
res = l2network_db.remove_vlan_binding(network_id)
LOG.debug("Deleted vlan binding for vlan: %s" % res.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(res.vlan_id)
return vlan_dict
except Exception, exc:
raise Exception("Failed to delete vlan binding: %s" % str(exc))
def update_vlan_binding(self, network_id, vlan_id):
"""Update a vlan binding"""
try:
res = l2network_db.update_vlan_binding(network_id, vlan_id)
LOG.debug("Updating vlan binding for vlan: %s" % res.vlan_id)
vlan_dict = {}
vlan_dict["vlan-id"] = str(res.vlan_id)
vlan_dict["net-id"] = str(res.network_id)
return vlan_dict
except Exception, exc:
raise Exception("Failed to update vlan binding: %s" % str(exc))
class QuantumDB(object):
"""Class conisting of methods to call Quantum db methods"""
def get_all_networks(self, tenant_id):
"""Get all networks"""
nets = []
try:
for net in db.network_list(tenant_id):
LOG.debug("Getting network: %s" % net.uuid)
net_dict = {}
net_dict["tenant-id"] = net.tenant_id
net_dict["net-id"] = str(net.uuid)
net_dict["net-name"] = net.name
nets.append(net_dict)
except Exception, exc:
LOG.error("Failed to get all networks: %s" % str(exc))
return nets
def create_network(self, tenant_id, net_name):
"""Create a network"""
net_dict = {}
try:
res = db.network_create(tenant_id,
net_name,
op_status="UP")
LOG.debug("Created network: %s" % res.uuid)
net_dict["tenant-id"] = res.tenant_id
net_dict["net-id"] = str(res.uuid)
net_dict["net-name"] = res.name
return net_dict
except Exception, exc:
LOG.error("Failed to create network: %s" % str(exc))
def delete_network(self, net_id):
"""Delete a network"""
try:
net = db.network_destroy(net_id)
LOG.debug("Deleted network: %s" % net.uuid)
net_dict = {}
net_dict["net-id"] = str(net.uuid)
return net_dict
except Exception, exc:
raise Exception("Failed to delete port: %s" % str(exc))
class L2networkDBTest(unittest.TestCase):
"""Class conisting of L2network DB unit tests"""
def setUp(self):
"""Setup for tests"""
l2network_db.initialize()
l2network_db.create_vlanids()
self.dbtest = L2networkDB()
self.quantum = QuantumDB()
LOG.debug("Setup")
def tearDown(self):
"""Tear Down"""
db.clear_db()
def test_create_vlanbinding(self):
net1 = self.quantum.create_network("t1", "netid1")
vlan1 = self.dbtest.create_vlan_binding(10, net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
self.teardown_vlanbinding()
self.teardown_network()
def test_getall_vlanbindings(self):
net1 = self.quantum.create_network("t1", "netid1")
net2 = self.quantum.create_network("t1", "netid2")
vlan1 = self.dbtest.create_vlan_binding(10, net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
vlan2 = self.dbtest.create_vlan_binding(20, net2["net-id"])
self.assertTrue(vlan2["vlan-id"] == "20")
vlans = self.dbtest.get_all_vlan_bindings()
self.assertTrue(len(vlans) == 2)
self.teardown_vlanbinding()
self.teardown_network()
def test_delete_vlanbinding(self):
net1 = self.quantum.create_network("t1", "netid1")
vlan1 = self.dbtest.create_vlan_binding(10, net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
self.dbtest.delete_vlan_binding(net1["net-id"])
vlans = self.dbtest.get_all_vlan_bindings()
count = 0
for vlan in vlans:
if vlan["vlan-id"] is "10":
count += 1
self.assertTrue(count == 0)
self.teardown_vlanbinding()
self.teardown_network()
def test_update_vlanbinding(self):
net1 = self.quantum.create_network("t1", "netid1")
vlan1 = self.dbtest.create_vlan_binding(10, net1["net-id"])
self.assertTrue(vlan1["vlan-id"] == "10")
vlan1 = self.dbtest.update_vlan_binding(net1["net-id"], 11)
self.assertTrue(vlan1["vlan-id"] == "11")
self.teardown_vlanbinding()
self.teardown_network()
def test_vlanids(self):
l2network_db.create_vlanids()
vlanids = l2network_db.get_all_vlanids()
self.assertGreater(len(vlanids), 0)
vlanid = l2network_db.reserve_vlanid()
used = l2network_db.is_vlanid_used(vlanid)
self.assertTrue(used)
used = l2network_db.release_vlanid(vlanid)
self.assertFalse(used)
self.teardown_vlanbinding()
self.teardown_network()
def test_specific_vlanid_outside(self):
l2network_db.create_v
|
ultima51x/shelltag
|
test/functions.py
|
Python
|
gpl-2.0
| 1,243
| 0.005632
|
# Copyright 2010 David Hwang
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
"""
These are some helper functions for tests.
"""
import shutil
import os
import os.path
front = "../test/data/original/front.jpg"
back = "../test/data/original/back.jpg"
pathlist = [
"../test/data/2010 - The Noise",
"../test/data/test.mp3",
"../test/data/front.jpg",
"../test/data/back.jpg"
]
def copymp3(filename):
"""
Copies a test mp3 to a testfile called test.mp3. Valid inputs include:
"empty.mp3"
"id3v1.mp3"
"id3v23.mp3"
"id3v24art.mp3"
"id3v24noart.mp3"
"id3v123.mp3"
"id3v124.mp3"
"""
shutil.copyfile(
"../test/data/original/" + filename,
"../test/data/test.mp3")
def copyfolder():
shutil.copytr
|
ee("../test/data/original/2010 - The Noise",
"../test/data/2010 - The Noise")
def clear():
"""
Deletes all files whic
|
h exist under pathlist
"""
for path in pathlist:
if os.path.exists(path):
if os.path.isdir(path): shutil.rmtree(path)
else: os.remove(path)
|
wackerly/faucet
|
faucet/valve_util.py
|
Python
|
apache-2.0
| 6,228
| 0.000642
|
"""Utility functions for FAUCET."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from logging.handlers import WatchedFileHandler
import os
import signal
import sys
from functools import wraps
def kill_on_exception(logname):
"""decorator to ensure functions will kill ryu when an unhandled exception
occurs"""
def _koe(func):
@wraps(func)
def __koe(*args, **kwargs):
try:
func(*args, **kwargs)
except:
logging.getLogger(logname).exception(
'Unhandled exception, killing RYU')
logging.shutdown()
os.kill(os.getpid(), signal.SIGTERM)
return __koe
return _koe
def utf8_decode(msg_str):
"""Gracefully decode a possibly UTF-8 string."""
return msg_str.decode('utf-8', errors='replace')
def get_sys_prefix():
"""Returns an additional prefix for log and configuration files when used in
a virtual environment"""
# Find the appropriate prefix for config and log file default locations
# in case Faucet is run in a virtual environment. virtualenv marks the
# original path in sys.real_prefix. If this value exists, and is
# different from sys.prefix, then we are most likely running in a
# virtualenv. Also check for Py3.3+ pyvenv.
sysprefix = ''
if (getattr(sys, 'real_prefix', sys.prefix) != sys.prefix or
getattr(sys, 'base_prefix', sys.prefix) != sys.prefix):
sysprefix = sys.prefix
return sysprefix
_PREFIX = get_sys_prefix()
# To specify a boolean-only setting, set the default value to a bool type.
DEFAULTS = {
'FAUCET_CONFIG': ''.join((
_PREFIX,
'/etc/faucet/faucet.yaml',
':',
_PREFIX,
'/etc/ryu/faucet/faucet.yaml')),
'FAUCET_CONFIG_STAT_RELOAD': False,
'FAUCET_LOG_LEVEL': 'INFO',
'FAUCET_LOG': _PREFIX + '/var/log/faucet/faucet.log',
'FAUCET_EVENT_SOCK': '', # Special-case, see get_setting().
'FAUCET_EXCEPTION_LOG': _PREFIX + '/var/log/faucet/faucet_exception.log',
'FAUCET_PROMETHEUS_PORT': '9302',
'FAUCET_PROMETHEUS_ADDR': '0.0.0.0',
'FAUCET_PIPELINE_DIR': _PREFIX + '/etc/faucet' + ':' + _PREFIX + '/etc/ryu/faucet',
'GAUGE_CONFIG': ''.join((
_PREFIX,
'/etc/faucet/gauge.yaml',
':',
_PREFIX,
'/etc/ryu/faucet/gauge.yaml')),
'GAUGE_CONFIG_STAT_RELOAD': False,
'GAUGE_LOG_LEVEL': 'INFO',
'GAUGE_PROMETHEUS_ADDR': '0.0.0.0',
'GAUGE_EXCEPTION_LOG': _PREFIX + '/var/log/faucet/gauge_exception.log',
'GAUGE_LOG': _PREFIX + '/var/log/faucet/gauge.log'
}
def _cast_bool(value):
"""Return True if value is a non-zero int."""
try:
return int(value) != 0
except ValueError:
return False
def get_setting(name, path_eval=False):
"""Returns value of specified configuration setting."""
default_value = DEFAULTS[name]
result = os.getenv(name, default_value)
# split on ':' and find the first suitable path
if (path_eval and
isinstance(result, str) and
isinstance(default_value, str) and not
isinstance(default_value, bool)):
locations = result.split(":")
result = None
for loc in locations:
if os.path.isfi
|
le(loc):
result = loc
break
if result is None:
result = locations[0]
# Check for setting that expects a boolean result.
if isinstance(default_value, bool):
return _cast_bool(result)
# Special default for FAUCET_EVENT_SOCK.
if name == 'FAUCET_EVENT_SOCK':
if result == '0':
return ''
if _cast_bool(result):
return _PREFIX + '/var/run/faucet/faucet.sock'
return r
|
esult
def get_logger(logname, logfile, loglevel, propagate):
"""Create and return a logger object."""
stream_handlers = {
'STDOUT': sys.stdout,
'STDERR': sys.stderr,
}
try:
if logfile in stream_handlers:
logger_handler = logging.StreamHandler(stream_handlers[logfile])
else:
logger_handler = WatchedFileHandler(logfile)
except PermissionError as err: # pytype: disable=name-error
print(err)
sys.exit(-1)
logger = logging.getLogger(logname)
log_fmt = '%(asctime)s %(name)-6s %(levelname)-8s %(message)s'
logger_handler.setFormatter(
logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
logger.addHandler(logger_handler)
logger.propagate = propagate
logger.setLevel(loglevel)
return logger
def close_logger(logger):
"""Close all handlers on logger object."""
if logger is None:
return
for handler in list(logger.handlers):
handler.close()
logger.removeHandler(handler)
def dpid_log(dpid):
"""Log a DP ID as hex/decimal."""
return 'DPID %u (0x%x)' % (dpid, dpid)
def btos(b_str):
"""Return byte array/string as string."""
return b_str.encode('utf-8').decode('utf-8', 'strict')
def stat_config_files(config_hashes):
"""Return dict of a subset of stat attributes on config files."""
config_files_stats = {}
for config_file in list(config_hashes.keys()):
try:
config_file_stat = os.stat(config_file)
except OSError:
continue
config_files_stats[config_file] = (
config_file_stat.st_size,
config_file_stat.st_mtime,
config_file_stat.st_ctime)
return config_files_stats
|
ingadhoc/sale
|
crm_survey/models/crm_job.py
|
Python
|
agpl-3.0
| 291
| 0
|
from
|
odoo import fields, models
class Job(models.Model):
_inherit = "crm.team"
survey_id = fields.Many2one(
'survey.survey', "Interview Form",
help="Choose an interview form")
def action_print_survey(self):
|
return self.survey_id.action_print_survey()
|
LaFriOC/LabJack
|
Python_LJM/Examples/eAddresses.py
|
Python
|
gpl-3.0
| 1,299
| 0.009238
|
"""
Demonstrates how to use the labjack.ljm.eAddresses (LJM_eAddresses) function.
"""
from labjack import ljm
# Open first found LabJack
handle = ljm.open(ljm.constants.dtANY, ljm.constants.ctANY, "ANY")
#handle = ljm.openS("ANY", "ANY", "ANY")
info = ljm.getHandleInfo(handle)
print("Opened a LabJack with Device type: %i, Connection type: %i,\n" \
"Serial number: %i, IP address: %s, Port: %i,\nMax bytes per MB: %i" % \
(info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4], info[5]))
# Setup and call eAddresses to write/read values to/from the LabJack.
numFrames = 3
aAddresses = [1000, 55110, 55110] # [DAC0, TEST_UINT16, TEST_UINT16]
aDataTypes = [ljm.constants.FLOAT32, ljm.constants.UINT16, ljm.constants.UINT16]
aWrites = [ljm.constants.WRITE, ljm.constants.WRITE, ljm.constants.READ]
aNumValues = [1, 1, 1
|
]
aValues = [2.5, 12345, 0] # [write 2.5 V, write 1
|
2345, read]
results = ljm.eAddresses(handle, numFrames, aAddresses, aDataTypes, aWrites, aNumValues, aValues)
print("\neAddresses results: ")
start = 0
for i in range(numFrames):
end = start + aNumValues[i]
print(" Address - %i, data type - %i, write - %i, values: %s" % \
(aAddresses[i], aDataTypes[i], aWrites[i], str(results[start:end])))
start = end
# Close handle
ljm.close(handle)
|
thaim/ansible
|
test/integration/targets/s3_bucket_notification/files/mini_lambda.py
|
Python
|
mit
| 145
| 0
|
import json
def lambda_handle
|
r(event, context):
return {
'statusCode': 200,
'bod
|
y': json.dumps('Hello from Lambda!')
}
|
opennewzealand/linz2osm
|
linz2osm/data_dict/migrations/0007_add_model_Dataset.py
|
Python
|
gpl-3.0
| 3,063
| 0.005224
|
# -*- coding: utf-8 -*-
# LINZ-2-OSM
# Copyright (C) 2010-2012 Koordinates Ltd.
#
# This pro
|
gram is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Y
|
ou should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Dataset'
db.create_table('data_dict_dataset', (
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, primary_key=True)),
('database_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')()),
('srid', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('data_dict', ['Dataset'])
def backwards(self, orm):
# Deleting model 'Dataset'
db.delete_table('data_dict_dataset')
models = {
'data_dict.dataset': {
'Meta': {'object_name': 'Dataset'},
'database_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'primary_key': 'True'}),
'srid': ('django.db.models.fields.IntegerField', [], {})
},
'data_dict.layer': {
'Meta': {'object_name': 'Layer'},
'entity': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'processors': ('linz2osm.utils.db_fields.JSONField', [], {'null': 'True', 'blank': 'True'})
},
'data_dict.tag': {
'Meta': {'unique_together': "(('layer', 'tag'),)", 'object_name': 'Tag'},
'code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'null': 'True', 'to': "orm['data_dict.Layer']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['data_dict']
|
molobrakos/home-assistant
|
homeassistant/components/mobile_app/sensor.py
|
Python
|
apache-2.0
| 2,104
| 0
|
"""Sensor platform for mobile_app."""
from functools import partial
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE_SENSOR as ENTITY_TYPE,
ATTR_SENSOR_UNIQUE_ID, ATTR_SENSOR_UOM, DATA_DEVICES,
DOMAIN)
from .entity import MobileAppEntity, sensor_id
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up mobile app sensor from a config entry."""
entities = list()
webhook_id = config_entry.data[CONF_WEBHOOK_ID]
for config in hass.data[DOMAIN][ENTITY_TYPE].values():
if config[CONF_WEBHOOK_ID] != webhook_id:
continue
device = hass.data[DOMAIN][DATA_DEVICES][webhook_id]
entities.append(MobileAppSensor(config, device, config_entry))
async_add_entities(entities)
@callback
def handle_sensor_registration(webhook_id, data):
if data[CONF_WEBHOOK_ID] != webhook_id:
return
unique_id = sensor_id(data[CONF_WEBHOOK_ID],
data[ATTR_SENSOR_UNIQUE_ID])
entity = hass.data[DOMAIN][ENTITY_TYPE][unique_id]
if 'added' in entity:
return
entity['added'] = True
device = hass.data[DOMAIN][DATA_DEVICES][data[CONF_WEBHOOK_ID]]
async_add_entities([MobileAppSensor(data, device, config_entry)])
async_dispatcher_connect(hass,
'{}_{}_register'.format(DOMAIN, ENTITY_TYPE),
partial(handle_sensor_registration, webhook_id))
class MobileAppSensor(MobileAppEntity):
"""Representation of an mobile app sensor."""
@prop
|
erty
def state(self):
"""Return the state of the sensor."""
return self._config[ATTR_SENSOR_STATE]
@property
def unit_of_measurement(self)
|
:
"""Return the unit of measurement this sensor expresses itself in."""
return self._config.get(ATTR_SENSOR_UOM)
|
mscuthbert/abjad
|
abjad/tools/datastructuretools/test/test_datastructuretools_TreeContainer_append.py
|
Python
|
gpl-3.0
| 796
| 0.001256
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_datastructuretools_TreeContainer_append_01():
leaf_a = datastructuretools.TreeNode()
leaf_b = datastructuretools.TreeNode()
leaf_c = datastructuretools.TreeNode()
leaf_d = datastructuretools.TreeNode()
container = datastructuretools.TreeContainer()
assert container.children == ()
container.appe
|
nd(leaf_a)
assert container.children == (leaf_a,)
container.append(leaf_b)
assert container.children == (leaf_a, leaf_b)
container.append(leaf_c)
assert container.children == (leaf_a, leaf_b, leaf_c)
container.append(leaf_d)
assert container.children == (leaf_a, leaf_b, leaf
|
_c, leaf_d)
container.append(leaf_a)
assert container.children == (leaf_b, leaf_c, leaf_d, leaf_a)
|
iamahuman/angr
|
tests/test_tracer.py
|
Python
|
bsd-2-clause
| 7,429
| 0.003231
|
import os
import sys
import logging
import nose
import angr
from common import bin_location, do_trace, slow_test
def tracer_cgc(filename, test_name, stdin, copy_states=False):
p = angr.Project(filename)
p.simos.syscall_library.update(angr.SIM_LIBRARIES['cgcabi_tracer'])
trace, magic, crash_mode, crash_addr = do_trace(p, test_name, stdin)
s = p.factory.entry_state(mode='tracing', stdin=angr.SimFileStream, flag_page=magic)
s.preconstrainer.preconstrain_file(stdin, s.posix.stdin, True)
simgr = p.factory.simulation_manager(s, hierarchy=False, save_unconstrained=crash_mode)
t = angr.exploration_techniques.Tracer(trace, crash_addr=crash_addr, keep_predecessors=1, copy_states=copy_states)
simgr.use_technique(t)
simgr.use_technique(angr.exploration_techniques.Oppologist())
return simgr, t
def tracer_linux(filename, test_name, stdin):
p = angr.Project(filename)
trace, _, crash_mode, crash_addr = do_trace(p, test_name, stdin, ld_linux=p.loader.linux_loader_object.binary, library_path=set(os.path.dirname(obj.binary) for obj in p.loader.all_elf_objects), record_stdout=True)
s = p.factory.full_init_state(mode='tracing', stdin=angr.SimFileStream)
s.preconstrainer.preconstrain_file(stdin, s.posix.stdin, True)
simgr = p.factory.simulation_manager(s, hierarchy=False, save_unconstrained=crash_mode)
t = angr.exploration_techniques.Tracer(trace, crash_addr=crash_addr)
simgr.use_technique(t)
simgr.use_technique(angr.exploration_techniques.Oppologist())
return simgr, t
def test_recursion():
blob = bytes.fromhex("00aadd114000000000000000200000001d0000000005000000aadd2a1100001d0000000001e8030000aadd21118611b3b3b3b3b3e3b1b1b1adb1b1b1b1b1b1118611981d8611")
fname = os.path.join(os.path.dirname(__file__), "..", "..", "binaries", "tests", "cgc", "NRFIN_00075")
simgr, _ = tracer_cgc(fname, 'tracer_recursion', blob)
simgr.run()
nose.tools.assert_true(simgr.crashed)
nose.tools.assert_true(simgr.crashed[0].solver.symbolic(simgr.crashed[0].regs.ip))
@slow_test
def broken_cache_stall():
# test a valid palindrome
b = os.path.join(bin_location, "tests", "cgc", "CROMU_00071")
blob = bytes.fromhex("0c0c492a53acacacacacacacacacacacacac000100800a0b690e0aef6503697d660a0059e20afc0a0a332f7d66660a0059e20afc0a0a332f7fffffff16fb1616162516161616161616166a7dffffff7b0e0a0a6603697d660a0059e21c")
simgr, tracer = tracer_cgc(b, 'tracer_cache_stall', blob)
simgr.run()
crash_path = tracer.predecessors[-1]
crash_state = simgr.crashed[0]
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
# load it again
simgr, tracer = tracer_cgc(b, 'tracer_cache_stall', blob)
simgr.run()
crash_path = tracer.predecessors[-1]
crash_state = simgr.one_crashed
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def test_manual_recursion():
if not sys.platform.startswith('linux'):
raise nose.SkipTest()
b = os.path.join(bin_location, "tests", "cgc", "CROMU_00071")
blob = open(os.path.join(bin_location, 'tests_data', 'crash2731'), 'rb').read()
simgr, tracer = tracer_cgc(b, 'tracer_manual_recursion', blob)
simgr.run()
crash_path = tracer.predecessors[-1]
crash_state = simgr.one_crashed
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def test_cgc_se1_palindrome_raw():
b = os.path.join(bin_location, "tests", "cgc", "sc1_0b32aa01_01")
# test a valid palindrome
simgr, _ = tracer_cgc(b, 'tracer_cgc_se1_palindrome_raw_nocrash', b'racecar\n')
simgr.run()
# make sure the heap base is correct and hasn't been altered from the default
nose.tools.assert_true('traced' in simgr.stashes)
nose.tools.assert_equal(simgr.traced[0].cgc.allocation_base, 0xb8000
|
000)
# make sure there is no crash state
nose.tools.assert_false(simgr.crashed)
# make sure angr modeled the correct output
|
stdout_dump = simgr.traced[0].posix.dumps(1)
nose.tools.assert_true(stdout_dump.startswith(b"\nWelcome to Palindrome Finder\n\n"
b"\tPlease enter a possible palindrome: "
b"\t\tYes, that's a palindrome!\n\n"
b"\tPlease enter a possible palindrome: "))
# make sure there were no 'Nope's from non-palindromes
nose.tools.assert_false(b"Nope" in stdout_dump)
# now test crashing input
simgr, _ = tracer_cgc(b, 'tracer_cgc_se1_palindrome_raw_yescrash', b'A'*129)
simgr.run()
nose.tools.assert_true(simgr.crashed)
def test_symbolic_sized_receives():
b = os.path.join(bin_location, "tests", "cgc", "CROMU_00070")
simgr, _ = tracer_cgc(b, 'tracer_symbolic_sized_receives', b'hello')
simgr.run()
nose.tools.assert_false(simgr.crashed)
nose.tools.assert_true('traced' in simgr.stashes)
simgr, _ = tracer_cgc(b, 'tracer_symbolic_sized_receives_nulls', b'\0'*20)
simgr.run()
nose.tools.assert_false(simgr.crashed)
nose.tools.assert_true('traced' in simgr.stashes)
def test_allocation_base_continuity():
correct_out = b'prepare for a challenge\nb7fff000\nb7ffe000\nb7ffd000\nb7ffc000\nb7ffb000\nb7ffa000\nb7ff9000\nb7ff8000\nb7ff7000\nb7ff6000\nb7ff5000\nb7ff4000\nb7ff3000\nb7ff2000\nb7ff1000\nb7ff0000\nb7fef000\nb7fee000\nb7fed000\nb7fec000\ndeallocating b7ffa000\na: b7ffb000\nb: b7fff000\nc: b7ff5000\nd: b7feb000\ne: b7fe8000\ne: b7fa8000\na: b7ffe000\nb: b7ffd000\nc: b7ff7000\nd: b7ff6000\ne: b7ff3000\ne: b7f68000\nallocate: 3\na: b7fef000\n'
b = os.path.join(bin_location, "tests", "i386", "cgc_allocations")
simgr, _ = tracer_cgc(b, 'tracer_allocation_base_continuity', b'')
simgr.run()
nose.tools.assert_equal(simgr.traced[0].posix.dumps(1), correct_out)
def test_crash_addr_detection():
b = os.path.join(bin_location, "tests", "i386", "call_symbolic")
simgr, _ = tracer_cgc(b, 'tracer_crash_addr_detection', b'A'*700)
simgr.run()
nose.tools.assert_true(simgr.crashed)
nose.tools.assert_true(simgr.crashed[0].solver.symbolic(simgr.crashed[0].regs.ip))
def test_fauxware():
if not sys.platform.startswith('linux'):
raise nose.SkipTest()
b = os.path.join(bin_location, "tests", "x86_64", "fauxware")
simgr, _ = tracer_linux(b, 'tracer_fauxware', b'A'*18)
simgr.run()
nose.tools.assert_true('traced' in simgr.stashes)
def run_all():
def print_test_name(name):
print('#' * (len(name) + 8))
print('###', name, '###')
print('#' * (len(name) + 8))
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
print_test_name(f)
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.simos").setLevel("DEBUG")
logging.getLogger("angr.state_plugins.preconstrainer").setLevel("DEBUG")
logging.getLogger("angr.exploration_techniques.tracer").setLevel("DEBUG")
logging.getLogger("angr.exploration_techniques.crash_monitor").setLevel("DEBUG")
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
0ps/wfuzz
|
src/wfuzz/externals/moduleman/modulefilter.py
|
Python
|
gpl-2.0
| 4,524
| 0.002653
|
# mimicking nmap script filter
# nmap --script "http-*"
# Loads all scripts whose name starts with http-, such as http-auth and http-open-proxy. The argument to --script had to be in quotes to protect the wildcard from the shell.
# not valid for categories!
#
# More complicated script selection can be done using the and, or, and not operators to build Boolean expressions. The operators have the same precedence[12] as in Lua: not is the
# highest, followed by and and then or. You can alter precedence by using parentheses. Because expressions contain space characters it is necessary to quote them.
#
# nmap --script "not intrusive"
# Loads every script except for those in the intrusive category.
#
# nmap --script "default or safe"
# This is functionally
|
equivalent to nmap --script "default,safe". It loads all scripts that are in the default category or the safe
|
category or both.
#
# nmap --script "default and safe"
# Loads those scripts that are in both the default and safe categories.
#
# nmap --script "(default or safe or intrusive) and not http-*"
# Loads scripts in the default, safe, or intrusive categories, except for those whose names start with http-.
PYPARSING = True
try:
from pyparsing import Word, Group, oneOf, Optional, Suppress, ZeroOrMore, Literal, alphas, alphanums
except ImportError:
PYPARSING = False
class IFilter:
def is_visible(self, plugin, filter_string):
raise NotImplementedError
class Filter(IFilter):
def __init__(self):
if PYPARSING:
category = Word(alphas + "_-*", alphanums + "_-*")
operator = oneOf("and or ,")
neg_operator = "not"
elementRef = category
definition = elementRef + ZeroOrMore(operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
neg_nestedformula = Optional(neg_operator) + nestedformula
self.finalformula = neg_nestedformula + ZeroOrMore(operator + neg_nestedformula)
elementRef.setParseAction(self.__compute_element)
neg_nestedformula.setParseAction(self.__compute_neg_formula)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
def __compute_neg_formula(self, tokens):
if len(tokens) > 1 and tokens[0] == 'not':
return not tokens[1]
else:
return tokens[0]
def __compute_element(self, tokens):
item = tokens[0]
wildc_index = item.find("*")
if wildc_index > 0:
return self.plugin.name.startswith(item[:wildc_index])
else:
if isinstance(self.plugin.category, list):
return (item in self.plugin.category or self.plugin.name == item)
else:
return (self.plugin.category == item or self.plugin.name == item)
def __myreduce(self, elements):
first = elements[0]
for i in range(1, len(elements), 2):
if elements[i] == "and":
first = (first and elements[i + 1])
elif elements[i] == "or" or elements[i] == ",":
first = (first or elements[i + 1])
return first
def __compute_formula(self, tokens):
return self.__myreduce(tokens[0])
def simple_filter(self, plugin, filter_string):
ret = []
for item in filter_string.split(","):
wildc_index = item.find("*")
if wildc_index > 0:
ret.append((item in plugin.category or plugin.name.startswith(item[:wildc_index])))
else:
ret.append((item in plugin.category or plugin.name == item))
return any(ret)
def simple_filter_banned_keywords(self, filter_string):
if filter_string.find("(") >= 0:
return True
elif filter_string.find(")") >= 0:
return True
elif any(x in ["or", "not", "and"] for x in filter_string.split(" ")):
return True
else:
return False
def is_visible(self, plugin, filter_string):
self.plugin = plugin
if PYPARSING:
return self.finalformula.parseString(filter_string)[0]
else:
if self.simple_filter_banned_keywords(filter_string):
raise Exception("Pyparsing missing, complex filters not allowed.")
else:
return self.simple_filter(plugin, filter_string)
|
pandas-dev/pandas
|
pandas/tests/frame/indexing/test_coercion.py
|
Python
|
bsd-3-clause
| 5,463
| 0.001464
|
"""
Tests for values coercion in setitem-like operations on DataFrame.
For the most part, these should be multi-column DataFrames, otherwise
we would share the tests with Series.
"""
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSetitemCoercion:
@pytest.mark.xfail(reason="Unnecessary cast.")
@pytest.mark.parametrize("consolidate", [True, False])
def test_loc_setitem_multiindex_columns(self, consolidate):
# GH#18415 Setting values in a single column preserves dtype,
# while setting them in multiple columns did unwanted cast.
# Note that A here has 2 blocks, below we do the same thing
# with a consolidated frame.
A = DataFrame(np.zeros((6, 5), dtype=np.float32))
A = pd.concat([A, A], axis=1, keys=[1, 2])
if consolidate:
A = A._consolidate()
A.loc[2:3, (1, slice(2, 3))] = np.ones((2, 2), dtype=np.float32)
assert (A.dtypes == np.float32).all()
A.loc[0:5, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32)
assert (A.dtypes == np.float32).all()
A.loc[:, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32)
assert (A.dtypes == np.float32).all()
# TODO: i think this isn't about MultiIndex and could be done with iloc?
def test_37477():
# fixed by GH#45121
orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
expected = DataFrame({"A": [1, 2, 3], "B": [3, 1.2, 5]})
df = orig.copy()
df.at[1, "B"] = 1.2
tm.assert_frame_equal(df, expected)
df = orig.copy()
df.loc[1, "B"] = 1.2
tm.assert_frame_equal(df, expected)
df = orig.copy()
df.iat[1, 1] = 1.2
tm.assert_frame_equal(df, expected)
df = orig.copy()
df.iloc[1, 1] = 1.2
tm.assert_frame_equal(df, expected)
def test_6942(indexer_al):
# check that the .at __setitem__ after setting "Live" actually sets the data
start = Timestamp("2014-04-01")
t1 = Timestamp("2014-04-23 12:42:38.883082")
t2 = Timestamp("2014-04-24 01:33:30.040039")
dti = date_range(start, periods=1)
orig = DataFrame(index=dti, columns=["timenow", "Live"])
df = orig.copy()
indexer_al(df)[start, "timenow"] = t1
df["Live"] = True
df.at[start, "timenow"] = t2
assert df.iloc[0, 0] == t2
def test_26395(indexer_al):
# .at case fixed by GH#45121 (best guess)
df = DataFrame(index=["A", "B", "C"])
df["D"] = 0
indexer_al(df)["C", "D"] = 2
expected = DataFrame({"D": [0, 0, 2]}, index=["A", "B", "C"], dtype=np.int64)
tm.assert_frame_equal(df, e
|
xpected)
indexer_al(df)["C", "D"] = 44.5
expected = DataFrame({"D": [0, 0, 44.5]}, index=["A", "B", "C"], dtype=np.float64)
tm.assert_frame_equal(df, e
|
xpected)
indexer_al(df)["C", "D"] = "hello"
expected = DataFrame({"D": [0, 0, "hello"]}, index=["A", "B", "C"], dtype=object)
tm.assert_frame_equal(df, expected)
@pytest.mark.xfail(reason="unwanted upcast")
def test_15231():
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
df.loc[2] = Series({"a": 5, "b": 6})
assert (df.dtypes == np.int64).all()
df.loc[3] = Series({"a": 7})
# df["a"] doesn't have any NaNs, should not have been cast
exp_dtypes = Series([np.int64, np.float64], dtype=object, index=["a", "b"])
tm.assert_series_equal(df.dtypes, exp_dtypes)
@pytest.mark.xfail(reason="Unnecessarily upcasts to float64")
def test_iloc_setitem_unnecesssary_float_upcasting():
# GH#12255
df = DataFrame(
{
0: np.array([1, 3], dtype=np.float32),
1: np.array([2, 4], dtype=np.float32),
2: ["a", "b"],
}
)
orig = df.copy()
values = df[0].values.reshape(2, 1)
df.iloc[:, 0:1] = values
tm.assert_frame_equal(df, orig)
@pytest.mark.xfail(reason="unwanted casting to dt64")
def test_12499():
# TODO: OP in GH#12499 used np.datetim64("NaT") instead of pd.NaT,
# which has consequences for the expected df["two"] (though i think at
# the time it might not have because of a separate bug). See if it makes
# a difference which one we use here.
ts = Timestamp("2016-03-01 03:13:22.98986", tz="UTC")
data = [{"one": 0, "two": ts}]
orig = DataFrame(data)
df = orig.copy()
df.loc[1] = [np.nan, NaT]
expected = DataFrame(
{"one": [0, np.nan], "two": Series([ts, NaT], dtype="datetime64[ns, UTC]")}
)
tm.assert_frame_equal(df, expected)
data = [{"one": 0, "two": ts}]
df = orig.copy()
df.loc[1, :] = [np.nan, NaT]
tm.assert_frame_equal(df, expected)
@pytest.mark.xfail(reason="Too many columns cast to float64")
def test_20476():
mi = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
df = DataFrame(-1, index=range(3), columns=mi)
filler = DataFrame([[1, 2, 3.0]] * 3, index=range(3), columns=["a", "b", "c"])
df["A"] = filler
expected = DataFrame(
{
0: [1, 1, 1],
1: [2, 2, 2],
2: [3.0, 3.0, 3.0],
3: [-1, -1, -1],
4: [-1, -1, -1],
5: [-1, -1, -1],
}
)
expected.columns = mi
exp_dtypes = Series(
[np.dtype(np.int64)] * 2 + [np.dtype(np.float64)] + [np.dtype(np.int64)] * 3,
index=mi,
)
tm.assert_series_equal(df.dtypes, exp_dtypes)
|
kapilt/cloud-custodian
|
c7n/handler.py
|
Python
|
apache-2.0
| 6,491
| 0.000616
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud-Custodian AWS Lambda Entry Point
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
import json
from c7n.config import Config
from c7n.structure import StructureParser
from c7n.resources import load_resources
from c7n.policy import PolicyCollection
from c7n.utils import format_event, get_account_id_from_sts, local_session
import boto3
logging.root.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
log = logging.getLogger('custodian.lambda')
##########################################
#
# Env var AWS Lambda specific configuration options, these are part of
# our "public" interface and hence are subject to compatiblity constraints.
#
# Control whether custodian lambda policy skips events that represent errors.
# We default to skipping events which denote they have errors.
# Set with `export C7N_SKIP_EVTERR=no` to process error events
C7N_SKIP_EVTERR = True
# Control whether the triggering event is logged.
# Set with `export C7N_DEBUG_EVENT=no` to disable event logging.
C7N_DEBUG_EVENT = True
# Control whether a policy failure will result in a lambda execution failure.
# Lambda on error will report error metrics and depending on event source
# automatically retry.
# Set with `export C7N_CATCH_ERR=yes`
C7N_CATCH_ERR = False
##########################################
#
# Internal global variables
#
# config.json policy data dict
policy_data = None
# execution options for the policy
policy_config = None
def init_env_globals():
"""Set module level values from environment variables.
Encapsulated here to enable better testing.
"""
global C7N_SKIP_EVTERR, C7N_DEBUG_EVENT, C7N_CATCH_ERR
C7N_SKIP_EVTERR = os.environ.get(
'C7N_SKIP_ERR_EVENT', 'yes') == 'yes' and True or False
C7N_DEBUG_EVENT = os.environ.get(
'C7N_DEBUG_EVENT', 'yes') == 'yes' and True or False
C7N_CATCH_ERR = os.environ.get(
'C7N_CATCH_ERR', 'no').strip().lower() == 'yes' and True or False
def init_config(policy_config):
"""Get policy lambda execution configuration.
cli parameters are serialized into the policy lambda config,
we merge those with any policy specific execution options.
--assume role and -s output directory get special handling, as
to disambiguate any cli context.
account id is sourced from the config options or from api call
and cached as a global.
Todo: this should get refactored out to mu.py as part of the
write out of configuration, instead of runtime processed.
"""
exec_options = policy_config.get('execution-options', {})
# Remove some configuration options that don't make sense to translate from
# cli to lambda automatically.
# - assume role on cli doesn't translate, it is the defaul
|
t lambda role and
# used to provision the lambda.
# - profile doesnt translate to lambda its `home` dir setup dependent
# - dryrun doesn't translate (and shouldn't be present)
# - region doesn't translate from cli (the lambda is bound to a region), and
# on the cli represents the region the lambda is provisioned in.
for k in ('assume_role', 'profile', 'region', 'dryrun', 'cache'):
|
exec_options.pop(k, None)
# a cli local directory doesn't translate to lambda
if not exec_options.get('output_dir', '').startswith('s3'):
exec_options['output_dir'] = '/tmp'
account_id = None
# we can source account id from the cli parameters to avoid the sts call
if exec_options.get('account_id'):
account_id = exec_options['account_id']
# merge with policy specific configuration
exec_options.update(
policy_config['policies'][0].get('mode', {}).get('execution-options', {}))
# if using assume role in lambda ensure that the correct
# execution account is captured in options.
if 'assume_role' in exec_options:
account_id = exec_options['assume_role'].split(':')[4]
elif account_id is None:
session = local_session(boto3.Session)
account_id = get_account_id_from_sts(session)
exec_options['account_id'] = account_id
# Historical compatibility with manually set execution options
# previously this was a boolean, its now a string value with the
# boolean flag triggering a string value of 'aws'
if 'metrics_enabled' in exec_options \
and isinstance(exec_options['metrics_enabled'], bool) \
and exec_options['metrics_enabled']:
exec_options['metrics_enabled'] = 'aws'
return Config.empty(**exec_options)
# One time initilization of global environment settings
init_env_globals()
def dispatch_event(event, context):
error = event.get('detail', {}).get('errorCode')
if error and C7N_SKIP_EVTERR:
log.debug("Skipping failed operation: %s" % error)
return
# one time initialization for cold starts.
global policy_config, policy_data
if policy_config is None:
with open('config.json') as f:
policy_data = json.load(f)
policy_config = init_config(policy_data)
load_resources(StructureParser().get_resource_types(policy_data))
if C7N_DEBUG_EVENT:
event['debug'] = True
log.info("Processing event\n %s", format_event(event))
if not policy_data or not policy_data.get('policies'):
return False
policies = PolicyCollection.from_data(policy_data, policy_config)
for p in policies:
try:
# validation provides for an initialization point for
# some filters/actions.
p.validate()
p.push(event, context)
except Exception:
log.exception("error during policy execution")
if C7N_CATCH_ERR:
continue
raise
return True
|
divija96/Emotion-Detection
|
code/mouthdetection.py
|
Python
|
gpl-3.0
| 1,760
| 0.029545
|
"""
input: a loaded image;
output: [[x,y],[width,height]] of the detected mouth area
"""
import cv
def findmouth(img):
# INITIALIZE: loading the classifiers
haarFace = cv.Load('haarcascade_frontalface_default.xml')
haarMouth = cv.Load('haarcascade_mouth.xml')
# running the classifiers
storage = cv.CreateMemStorage()
detectedFace = cv.HaarDetectObjects(img, haarFace, storage)
detectedMouth = cv.HaarDetectObjects(img, haarMouth, storage)
# FACE: find the largest detected face as detected face
maxFaceSize = 0
maxFace = 0
if detectedFace:
for face in detectedFace: # face: [0][0]: x; [0][1]: y; [0][2]: width; [0][3]: height
if face[0][3]* face[0][2] > maxFaceSize:
maxFaceSize = face[0][3]* face[0][2]
maxFace = face
if maxFace == 0: # did not detect face
return 2
def mouth_in_lower_face(mouth,face):
# if the mouth is in the lower 2/5 of the face
# and the lower edge of mouth is above that of the face
# and the horizontal center of the mouth is the center of the face
if (mouth[0][1] > face[0][1] + face[0][3] * 3 / float(5)
and mouth[0][1] + mouth[0][3] < face[0][1] + face[0][3]
and abs((mouth[0][0] + mouth[0][2] / float(2))
- (face[0][0] + face[0][2] / float(2))) < face[0][2] / float(10)):
return True
else:
return False
|
# FILTER MOUTH
filteredMouth = []
if detectedMouth:
for mouth in detectedMouth:
if mouth_in_lower_face(mouth,maxFace):
filteredMouth.append(mouth)
maxMouthSize = 0
for mouth in filteredMouth:
if mouth[0][3]* mouth[0][2] > maxMouthSize:
maxMouthS
|
ize = mouth[0][3]* mouth[0][2]
maxMouth = mouth
try:
return maxMouth
except UnboundLocalError:
return 2
|
eukaryote31/chaos
|
github_api/prs.py
|
Python
|
mit
| 8,393
| 0.000953
|
import arrow
import settings
from . import misc
from . import voting
from . import comments
from . import exceptions as exc
def merge_pr(api, urn, pr, votes, total, threshold):
""" merge a pull request, if possible, and use a nice detailed merge commit
message """
pr_num = pr["number"]
pr_title = pr['title']
pr_description = pr['body']
path = "/repos/{urn}/pulls/{pr}/merge".format(urn=urn, pr=pr_num)
record = voting.friendly_voting_record(votes)
if record:
record = "Vote record:\n" + record
votes_summary = formatted_votes_summary(votes, total, threshold)
pr_url = "https://github.com/{urn}/pull/{pr}".format(urn=urn, pr=pr_num)
title = "merging PR #{num}: {pr_title}".format(
num=pr_num, pr_title=pr_title)
desc = """
{pr_url}: {pr_title}
Description:
{pr_description}
:ok_woman: PR passed {summary}.
{record}
""".strip().format(
pr_url=pr_url,
pr_title=pr_title,
pr_description=pr_description,
summary=votes_summary,
record=record,
)
data = {
"commit_title": title,
"commit_message": desc,
"merge_method": "merge",
# if some clever person attempts to submit more commits while we're
# aggregating votes, this sha check will fail and no merge will occur
"sha": pr["head"]["sha"],
}
try:
resp = api("PUT", path, json=data)
return resp["sha"]
except HTTPError as e:
resp = e.response
# could not be merged
if resp.status_code == 405:
raise exc.CouldntMerge
# someone trying to be sneaky and change their PR commits during voting
elif resp.status_code == 409:
raise exc.CouldntMerge
else:
raise
def formatted_votes_summary(votes, total, threshold):
vfor = sum(v for v in votes.values() if v > 0)
vagainst = abs(sum(v for v in votes.values() if v < 0))
return "with a vote of {vfor} for and {vagainst} against, with a weighted total of {total:.1f} and a threshold of {threshold:.1f}" \
.strip().format(vfor=vfor, vagainst=vagainst, total=total, threshold=threshold)
def formatted_votes_short_summary(votes, total, threshold):
vfor = sum(v for v in votes.values() if v > 0)
vagainst = abs(sum(v for v in votes.values() if v < 0))
return "vote: {vfor}-{vagainst}, weighted total: {total:.1f}, threshold: {threshold:.1f}" \
.strip().format(vfor=vfor, vagainst=vagainst, total=total, threshold=threshold)
def label_pr(api, urn, pr_num, labels):
""" set a pr's labels (removes old labels) """
if not isinstance(labels, (tuple, list)):
labels = [labels]
path = "/repos/{urn}/issues/{pr}/labels".format(urn=urn, pr=pr_num)
data = labels
resp = api("PUT", path, json=data)
def close_pr(api, urn, pr):
""" https://developer.github.com/v3/pulls/#update-a-pull-request """
path = "/repos/{urn}/pulls/{pr}".format(urn=urn, pr=pr["number"])
data = {
"state": "closed",
}
return api("patch", path, json=data)
def get_pr_last_updated(pr_data):
""" a helper for finding the utc datetime of the last pr branch
modifications """
repo = pr_data["head"]["repo"]
if repo:
dt = repo["pushed_at"]
else:
dt = pr_data["created_at"]
return arrow.get(dt)
def get_pr_comments(api, urn, pr_num):
""" yield all comments on a pr, weirdly excluding the initial pr comment
itself (the one the owner makes) """
params = {
"per_page": settings.DEFAULT_PAGINATION
}
path = "/repos/{urn}/issues/{pr}/comments".format(urn=urn, pr=pr_num)
comments = api("get", path, params=params)
for comment in comments:
yield comment
def get_ready_prs(api, urn, window):
""" yield mergeable, non-WIP prs that have had no modifications for longer
than the voting window. these are prs that are ready to be considered for
merging """
open_prs = get_open_prs(api, urn)
for pr in open_prs:
pr_num = pr["number"]
now = arrow.utcnow()
updated = get_pr_last_updated(pr)
delta = (now - updated).total_seconds()
is_wip = "WIP" in pr["title"]
if not is_wip and delta > window:
# we check if its mergeable if its outside the voting window,
# because there seems to be a race where a freshly-created PR exists
# in the paginated list of PRs, but 404s when trying to fetch it
# directly
mergeable = get_is_mergeable(api, urn, pr_num)
if mergeable is True:
label_pr(api, urn, pr_num, [])
yield pr
elif mergeable is False:
label_pr(api, urn, pr_num, ["conflicts"])
if delta >= 60 * 60 * settings.PR_STALE_HOURS:
comments.leave_stale_comment(
api, urn, pr["number"], round(delta / 60 / 60))
close_pr(api, urn, pr)
# mergeable can also be None, in which case we just skip it for now
def voting_window_remaining_seconds
|
(pr, window):
now = arrow.utcnow()
updated = get_pr_last_updated(pr)
delta = (now - updated).total_seconds()
return window - delta
def is_pr_in_voting_window(pr, window):
return voting_window_remaining_seconds(pr, window) <= 0
def get_pr_reviews(api, urn, pr_num):
""" get all pr reviews on a pr
https://help.github.com/articles/about-pull-request-reviews/ """
params = {
"per_page": settings
|
.DEFAULT_PAGINATION
}
path = "/repos/{urn}/pulls/{pr}/reviews".format(urn=urn, pr=pr_num)
data = api("get", path, params=params)
return data
def get_is_mergeable(api, urn, pr_num):
return get_pr(api, urn, pr_num)["mergeable"]
def get_pr(api, urn, pr_num):
""" helper for fetching a pr. necessary because the "mergeable" field does
not exist on prs that come back from paginated endpoints, so we must fetch
the pr directly """
path = "/repos/{urn}/pulls/{pr}".format(urn=urn, pr=pr_num)
pr = api("get", path)
return pr
def get_open_prs(api, urn):
params = {
"state": "open",
"sort": "updated",
"direction": "asc",
"per_page": settings.DEFAULT_PAGINATION,
}
path = "/repos/{urn}/pulls".format(urn=urn)
data = api("get", path, params=params)
return data
def get_reactions_for_pr(api, urn, pr):
path = "/repos/{urn}/issues/{pr}/reactions".format(urn=urn, pr=pr)
params = {"per_page": settings.DEFAULT_PAGINATION}
reactions = api("get", path, params=params)
for reaction in reactions:
yield reaction
def post_accepted_status(api, urn, pr, voting_window, votes, total, threshold):
sha = pr["head"]["sha"]
remaining_seconds = voting_window_remaining_seconds(pr, voting_window)
remaining_human = misc.seconds_to_human(remaining_seconds)
votes_summary = formatted_votes_short_summary(votes, total, threshold)
post_status(api, urn, sha, "success",
"remaining: {time}, {summary}".format(time=remaining_human, summary=votes_summary))
def post_rejected_status(api, urn, pr, voting_window, votes, total, threshold):
sha = pr["head"]["sha"]
remaining_seconds = voting_window_remaining_seconds(pr, voting_window)
remaining_human = misc.seconds_to_human(remaining_seconds)
votes_summary = formatted_votes_short_summary(votes, total, threshold)
post_status(api, urn, sha, "failure",
"remaining: {time}, {summary}".format(time=remaining_human, summary=votes_summary))
def post_pending_status(api, urn, pr, voting_window, votes, total, threshold):
sha = pr["head"]["sha"]
remaining_seconds = voting_window_remaining_seconds(pr, voting_window)
remaining_human = misc.seconds_to_human(remaining_seconds)
votes_summary = formatted_votes_short_summary(votes, total, threshold)
post_status(api, urn, sha, "pending",
"remaining: {time}, {summary}".format(time=remaining_human, summary=votes_summary))
def post_status(api, urn, sha, state, description):
""" apply an issue label to a pr
|
SimpleTax/merchant
|
billing/templatetags/billing_tags.py
|
Python
|
bsd-3-clause
| 740
| 0
|
"""
Template tags for Offsite payment gateways
"""
from django import template
from billing.templatetags.paypal_tags import paypal
from billing.templatetags.world_pay_tags import world_pay
from billing.templatetags.google_c
|
heckout_tags import google_checkout
from billing.templatetags.amazon_fps_tags import amazon_fps
from billing.te
|
mplatetags.braintree_payments_tags import braintree_payments
from billing.templatetags.stripe_tags import stripe_payment
from billing.templatetags.samurai_tags import samurai_payment
register = template.Library()
register.tag(google_checkout)
register.tag(paypal)
register.tag(world_pay)
register.tag(amazon_fps)
register.tag(braintree_payments)
register.tag(stripe_payment)
register.tag(samurai_payment)
|
spirali/qit
|
src/qit/base/file.py
|
Python
|
gpl-3.0
| 133
| 0.015038
|
from qit.base.type import Type
class File(Type):
|
pass_by_value = True
|
def build(self, builder):
return "FILE*"
|
pekingduck/emacs-sqlite3-api
|
tools/gen-consts.py
|
Python
|
gpl-3.0
| 908
| 0.01652
|
#!/usr/bin/env python3
import sys
import os
import re
useful_codes = []
with open(sys.argv[1]) as f:
for l in f.readlines():
useful_codes.append(l.rstrip())
# Read from sqlite3.h (from stdin)
# only codes that exist in useful_codes are included in consts.c
for line in sys.stdin.readlines():
# fields = [ "#define", "SQLITE_XXXX" "YYYY" ];
fields = re.split("\s+", line.rstrip(), 3)
#print("{0}".format(fields[1]))
if not fields[1] in useful_codes:
#print("{0} excluded".format(
|
fields[1]))
|
continue
sym = re.sub("_", "-", fields[1].lower())
if len(fields) > 2 and fields[2] != "":
print("#ifdef {0}".format(fields[1]))
if fields[2].startswith('"'):
print('defconst(env, "{0}", env->make_string(env, {1}, strlen({1})));'.format(sym, fields[1]))
else:
print('defconst(env, "{0}", env->make_integer(env, {1}));'.format(sym, fields[1]))
print("#endif")
|
eseidel/native_client_patches
|
src/trusted/service_runtime/export_header.py
|
Python
|
bsd-3-clause
| 2,925
| 0.014701
|
#!/usr/bin/python
#
# Copyright 2008, 2009, The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Tools for exporting Native Client ABI header files.
This module is used to export Native Client ABI header files -- which
are in the native_client/src/trusted/service_runtime/include directory
-- for use with the SDK and newlib to compile NaCl applications.
"""
import os
import re
import sys
UNMODIFIED_DIR=['nacl','abi']
def ProcessStream(instr, outstr):
"""Read internal version of header file from instr, write exported
version to outstr. The two transformations are to remove nacl_abi_
prefixes (in its various incarnations), and to change include
directives from the Google coding style
"native_client/include/foo/bar.h" to <foo/bar.h>, and from
"native_client/src/trusted/service_runtime/include/baz/quux.h" to
<baz/quux.h>."""
pat = r'\b(?:nacl_abi_|NaClAbi|NACL_ABI_)([A-Za-z0-9_]*)'
cpat = re.comp
|
ile(pat)
inc = (r'^#\s*include\s+"native_client(?:/src/trusted/service_runtime)?'+
r'/include/([^"]*)"')
cinc = re.compile(inc)
nostrip_beg = r'^#defin
|
e NACL_NO_STRIP'
cnostrip_beg = re.compile(nostrip_beg)
nostrip_end = r'^#undef NACL_NO_STRIP'
cnostrip_end = re.compile(nostrip_end)
nostrip = False
for line in instr:
if cinc.search(line):
print >>outstr, cinc.sub(r'#include <\1>', line)
else:
if nostrip:
if cnostrip_end.search(line):
nostrip = False;
print >>outstr, line,
else:
if cnostrip_beg.search(line):
nostrip = True;
print >>outstr, cpat.sub(r'\1', line),
# endif
# endfor
# enddef
def CopyStream(instr, outstr):
for line in instr:
outstr.write(line)
# endfor
# enddef
def ProcessDir(srcdir, dstdir, unmodified_dstdir):
if not os.path.isdir(srcdir):
return
# endif
if not os.path.isdir(dstdir):
os.makedirs(dstdir)
# endif
if not os.path.isdir(unmodified_dstdir):
os.makedirs(unmodified_dstdir)
# endif
for fn in os.listdir(srcdir):
srcpath = os.path.join(srcdir, fn)
dstpath = os.path.join(dstdir, fn)
undstpath = os.path.join(unmodified_dstdir, fn)
if os.path.isfile(srcpath) and fn.endswith('.h'):
ProcessStream(open(srcpath),
open(dstpath, 'w'))
CopyStream(open(srcpath),
open(undstpath, 'w'))
elif os.path.isdir(srcpath):
ProcessDir(srcpath, dstpath, undstpath)
# endif
# endfor
# enddef
def main(argv):
if len(argv) != 3:
print >>sys.stderr, ('Usage: ./export_header source/include/path'
' dest/include/path')
return 1
# endif
ProcessDir(argv[1], argv[2],
reduce(os.path.join, UNMODIFIED_DIR, argv[2]));
return 0
# enddef
if __name__ == '__main__':
sys.exit(main(sys.argv))
# endif
|
renguochao/PySymTool
|
py_group.py
|
Python
|
mit
| 8,872
| 0.001826
|
# coding=UTF-8
import mysql.connector
import xlrd
import xlsxwriter
import os
from mysql.connector import errorcode
from datetime import datetime
# 符号化后的 Excel 文件名
EXCEL_NAME = '20170223_4.0.1_feedback_result_py'
DB_NAME = 'zl_crash'
config = {
'user': 'root',
'password': '123456',
'host': '127.0.0.1',
'database': 'zl_crash',
}
class Report(object):
'''
Report class used to encapsulate the row data in EXCEL
'''
def __init__(self, report_id, exception_type, device_id, exception_symbols, os_version):
self.report_id = report_id;
self.exception_type = exception_type;
self.device_id = device_id;
self.exception_symbols = exception_symbols;
self.os_version = os_version;
def main():
begin_time = datetime.now()
# 表名
table_name = 'report_' + begin_time.strftime("%Y_%m_%d_%H_%M_%S")
# 建表
create_table_in_db(table_name)
# 插入数据
insert_symbolication_result_into_db(table_name)
# 对数据进行分组并导出
generate_grouped_exception(table_name)
end_time = datetime.now()
print('耗时:' + str(end_time - begin_time))
def create_table_in_db(table_name):
'''
Create a table in database, and named as `table_name`
:param table_name: table_name
'''
SQLS = {}
SQLS['drop_report'] = (
"DROP TABLE IF EXISTS `" + table_name + "`")
SQLS['report'] = (
"CREATE TABLE `" + table_name + "` ( "
"`report_id` int(11) NOT NULL AUTO_INCREMENT, "
"`exception_type` varchar(255) DEFAULT NULL, "
"`device_id` varchar(255) DEFAULT NULL, "
"`exception_symbols` longtext, "
"`os_version` varchar(255) DEFAULT NULL, "
"PRIMARY KEY (`report_id`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8")
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor();
for name, sql in SQLS.items():
try:
print("Executing sql {}.".format(name))
cursor.execute(sql)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print('Table already exists.')
else:
print(err.msg)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err.msg)
finally:
cursor.close()
conn.close()
def insert_symbolication_result_into_db(table_name):
'''
Insert the symbolicated result into database
:param table_name: table_name in database
'''
try:
conn = mysql.connector.connect(**config)
# print('connected to db')
cursor = conn.cursor()
insert_report = (
"INSERT INTO " + table_name + " "
"(exception_type, device_id, exception_symbols, os_version) "
"VALUES (%s, %s, %s, %s)")
work_book = xlrd.open_workbook(EXCEL_NAME + '.xlsx')
sheet = work_book.sheets()[0]
nrows = sheet.nrows
ncols = sheet.ncols
row_index = 1
for row_index in range(1, nrows):
data_row = sheet.row_values(row_index)
# assert col < ncols
device_id = data_row[0]
os_version = data_row[1]
exception_type = data_row[2]
exception_symbols = data_row[3]
if exception_symbols == '':
continue
data_report = (exception_type, device_id, exception_symbols, os_version)
# insert report data
cursor.execute(insert_report, data_report)
conn.commit()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err.msg)
finally:
cursor.close()
conn.close()
def generate_grouped_exception(table_name):
'''
According the group data in database, make all exception to group data.
:param table_name: table_name in zl_crash database
'''
EXCEPTION_TYPE_COUNT = {}
EXCEPTION_MAPPING = {}
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
group_exception_type = (
"SELECT exception_type, COUNT(*) as nums "
"FROM " + table_name + " GROUP BY exception_typ
|
e")
query_specific_exception = (
"SELECT * FROM " + table_name + " "
"WHERE exception_type = %s")
cursor.execute(group_exceptio
|
n_type)
for (exception_type, nums) in cursor:
EXCEPTION_TYPE_COUNT[exception_type] = nums
# print("exception_type:" + exception_type + ", nums:" + str(nums))
for exception_type in EXCEPTION_TYPE_COUNT.keys():
cursor.execute(query_specific_exception, (exception_type,))
exception_list = []
for (report_id, exception_type, device_id, exception_symbols, os_version) in cursor:
report = Report(report_id, exception_type, device_id, exception_symbols, os_version)
exception_list.append(report)
EXCEPTION_MAPPING[exception_type] = exception_list
write_grouped_exception_to_file(EXCEPTION_TYPE_COUNT, EXCEPTION_MAPPING)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err.msg)
finally:
cursor.close()
conn.close()
def write_grouped_exception_to_file(count, mapping):
'''
Export grouped exception to file
:param count: 字典 key:exception_type value:count
:param mapping: 字典 key:exception_type value:exception_list
'''
output_file_name = EXCEL_NAME + '_grouped.xlsx'
os.system('rm -rf ' + output_file_name)
workbook = xlsxwriter.Workbook(output_file_name)
worksheet = workbook.add_worksheet()
# 设置列宽
worksheet.set_column('A:A', 25)
worksheet.set_column('B:B', 10)
worksheet.set_column('C:C', 25)
worksheet.set_column('D:D', 40)
worksheet.set_column('E:E', 500)
# 粗体格式
bold = workbook.add_format({'font_size': 14,
'align': 'center',
'bold': True})
# 标题行
worksheet.write('A1', 'exception_type', bold)
worksheet.write('B1', 'count', bold)
worksheet.write('C1', 'os_version', bold)
worksheet.write('D1', 'device_id', bold)
worksheet.write('E1', 'symbols', bold)
# 写入 Excel Index 指示器
row_index = 1
col_index = 0
colors = ('#A8BAAA', '#FFF6CF', '#DCCDAE', '#B49D7E',
'#816854', '#334D5C', '#45B29D', '#EFC94C')
count_index = 0
pattern = 0.5
for (type, num) in count.items():
bg_color = colors[count_index % len(colors)]
col_format = workbook.add_format({'pattern': pattern,
'bg_color': bg_color})
num_col_format = workbook.add_format({'pattern': pattern,
'bg_color': bg_color,
'bold': True,
'align': 'center'})
count_index += 1
list = mapping[type]
for i in range(num):
report_item = list[i]
if i == 0:
worksheet.write(row_index, col_index, report_item.exception_type, col_format)
col_index += 1
worksheet.write(row_index, col_index, num, num_col_format)
col_index += 1
else:
worksheet.write(row_index, col_index, '', col_format)
col_index += 1
worksheet.write(row_index, c
|
zorna/zorna
|
zorna/notes/migrations/0003_changed_mime_type_length.py
|
Python
|
bsd-3-clause
| 9,634
| 0.007785
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ZornaNoteFile.mimetype'
db.alter_column('zorna_note_attachments', 'mimetype', self.gf('django.db.models.fields.CharField')(max_length=255))
def backwards(self, orm):
# Changing field 'ZornaNoteFile.mimetype'
db.alter_column('zorna_note_attachments', 'mimetype', self.gf('django.db.models.fields.CharField')(max_length=64))
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notes.zornanote': {
'Meta': {'ordering': "['-time_updated']", 'object_name': 'ZornaNote', 'db_table': "'zorna_notes'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notes.ZornaNoteCategory']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_modifier_notes_zornan
|
ote_related'", 'null': 'True', 'to': "orm['auth.User']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_owner_notes_zornanote_related'", 'null': 'True', 'to': "orm['auth.User']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'site_owner_notes_zornanote_related'", 'null': 'True', 'to': "orm['sites.Site']"})
|
,
'tags': ('tagging.fields.TagField', [], {}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'time_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'notes.zornanotecategory': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'ZornaNoteCategory', 'db_table': "'zorna_note_categories'"},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_modifier_notes_zornanotecategory_related'", 'null': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_owner_notes_zornanotecategory_related'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['notes.ZornaNoteCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'site_owner_notes_zornanotecategory_related'", 'null': 'True', 'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True'
|
chovanecm/sacredboard
|
sacredboard/app/data/pymongo/mongodb.py
|
Python
|
mit
| 3,502
| 0.000286
|
# coding=utf-8
"""Accesses data in Sacred's MongoDB."""
import pymongo
from sacredboard.app.data.datastorage import Cursor, DataStorage
from sacredboard.app.data.pymongo import GenericDAO, MongoMetricsDAO, MongoFilesDAO
from sacredboard.app.data.pymongo.rundao import MongoRunDAO
class MongoDbCursor(Cursor):
"""Implements Cursor for mongodb."""
def __init__(self, mongodb_cursor):
"""Initialize a MongoDB cursor."""
self.mongodb_cursor = mongodb_cursor
def count(self):
"""Return the number of items in this cursor."""
return self.mongodb_cursor.count()
def __iter__(self):
"""Iterate over runs."""
return self.mongodb_cursor
class PyMongoDataAccess(DataStorage):
"""Access records in MongoDB."""
d
|
ef __init__(self, uri, database_name, collection_name):
"""
Set up MongoDB access layer, don't connect yet.
Better use the static methods build_data_access
or build_data_access_with_uri
"""
super().__init__()
self._uri = uri
self._db_name = database_name
self._client = None
self._db = None
self._collection_name = collection_name
self._ge
|
neric_dao = None
def connect(self):
"""Initialize the database connection."""
self._client = self._create_client()
self._db = getattr(self._client, self._db_name)
self._generic_dao = GenericDAO(self._client, self._db_name)
def _create_client(self):
"""Return a new Mongo Client."""
return pymongo.MongoClient(host=self._uri)
@staticmethod
def build_data_access(host, port, database_name, collection_name):
"""
Create data access gateway.
:param host: The database server to connect to.
:type host: str
:param port: Database port.
:type port: int
:param database_name: Database name.
:type database_name: str
:param collection_name: Name of the collection with Sacred runs.
:type collection_name: str
"""
return PyMongoDataAccess("mongodb://%s:%d" % (host, port),
database_name, collection_name)
@staticmethod
def build_data_access_with_uri(uri, database_name, collection_name):
"""
Create data access gateway given a MongoDB URI.
:param uri: Connection string as defined in
https://docs.mongodb.com/manual/reference/connection-string/
:type uri: str
:param database_name: Database name
:type database_name: str
:param collection_name: Name of the collection
where Sacred stores its runs
:type collection_name: str
"""
return PyMongoDataAccess(uri, database_name, collection_name)
def get_metrics_dao(self):
"""
Return a data access object for metrics.
The method can be called only after a connection to DB is established.
Issue: https://github.com/chovanecm/sacredboard/issues/62
:return MetricsDAO
"""
return MongoMetricsDAO(self._generic_dao)
def get_run_dao(self):
"""
Return a data access object for Runs.
:return: RunDAO
"""
return MongoRunDAO(self._generic_dao, self._collection_name)
def get_files_dao(self):
"""
Return a data access object for Files.
:return: RunDAO
"""
return MongoFilesDAO(self._generic_dao)
|
sonofatailor/django-oscar
|
src/oscar/apps/address/abstract_models.py
|
Python
|
bsd-3-clause
| 21,058
| 0
|
import re
import zlib
from django.conf import settings
from django.core import exceptions
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import filter
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from phonenumber_field.modelfields import PhoneNumberField
from oscar.core.compat import AUTH_USER_MODEL
from oscar.core.decorators import deprecated
from oscar.models.fields import UppercaseCharField
@python_2_unicode_compatible
class AbstractAddress(models.Model):
"""
Superclass address object
This is subclassed and extended to provide models for
user, shipping and billing addresses.
"""
MR, MISS, MRS, MS, DR = ('Mr', 'Miss', 'Mrs', 'Ms', 'Dr')
TITLE_CHOICES = (
(MR, _("Mr")),
(MISS, _("Miss")),
(MRS, _("Mrs")),
(MS, _("Ms")),
(DR, _("Dr")),
)
POSTCODE_REQUIRED = 'postcode' in settings.OSCAR_REQUIRED_ADDRESS_FIELDS
# Regex for each country. Not listed countries don't use postcodes
# Based on http://en.wikipedia.org/wiki/List_of_postal_codes
POSTCODES_REGEX = {
'AC': r'^[A-Z]{4}[0-9][A-Z]$',
'AD': r'^AD[0-9]{3}$',
'AF': r'^[0-9]{4}$',
'AI': r'^AI-2640$',
'AL': r'^[0-9]{4}$',
'AM': r'^[0-9]{4}$',
'AR': r'^([0-9]{4}|[A-Z][0-9]{4}[A-Z]{3})$',
'AS': r'^[0-9]{5}(-[0-9]{4}|-[0-9]{6})?$',
'AT': r'^[0-9]{4}$',
'AU': r'^[0-9]{4}$',
'AX': r'^[0-9]{5}$',
'AZ': r'^AZ[0-9]{4}$',
'BA': r'^[0-9]{5}$',
'BB': r'^BB[0-9]{5}$',
'BD': r'^[0-9]{4}$',
'BE': r'^[0-9]{4}$',
'BG': r'^[0-9]{4}$',
'BH': r'^[0-9]{3,4}$',
'BL': r'^[0-9]{5}$',
'BM': r'^[A-Z]{2}([0-9]{2}|[A-Z]{2})',
'BN': r'^[A-Z]{2}[0-9]{4}$',
'BO': r'^[0-9]{4}$',
'BR': r'^[0-9]{5}(-[0-9]{3})?$',
'BT': r'^[0-9]{3}$',
'BY': r'^[0-9]{6}$',
'CA': r'^[A-Z][0-9][A-Z][0-9][A-Z][0-9]$',
'CC': r'^[0-9]{4}$',
'CH': r'^[0-9]{4}$',
'CL': r'^([0-9]{7}|[0-9]{3}-[0-9]{4})$',
'CN': r'^[0-9]{6}$',
'CO': r'^[0-9]{6}$',
'CR': r'^[0-9]{4,5}$',
'CU': r'^[0-9]{5}$',
'CV': r'^[0-9]{4}$',
'CX': r'^[0-9]{4}$',
'CY': r'^[0-9]{4}$',
'CZ': r'^[0-9]{5}$',
'DE': r'^[0-9]{5}$',
'DK': r'^[0-9]{4}$',
'DO': r'^[0-9]{5}$',
'DZ': r'^[0-9]{5}$',
'EC': r'^EC[0-9]{6}$',
'EE': r'^[0-9]{5}$',
'EG': r'^[0-9]{5}$',
'ES': r'^[0-9]{5}$',
'ET': r'^[0-9]{4}$',
'FI': r'^[0-9]{5}$',
'FK': r'^[A-Z]{4}[0-9][A-Z]{2}$',
'FM': r'^[0-9]{5}(-[0-9]{4})?$',
'FO': r'^[0-9]{3}$',
'FR': r'^[0-9]{5}$',
'GA': r'^[0-9]{2}.*[0-9]{2}$',
'GB': r'^[A-Z][A-Z0-9]{1,3}[0-9][A-Z]{2}$',
'GE': r'^[0-9]{4}$',
'GF': r'^[0-9]{5}$',
'GG': r'^([A-Z]{2}[0-9]{2,3}[A-Z]{2})$',
'GI': r'^GX111AA$',
'GL': r'^[0-9]{4}$',
'GP': r'^[0-9]{5}$',
'GR': r'^[0-9]{5}$',
'GS': r'^SIQQ1ZZ$',
'GT': r'^[0-9]{5}$',
'GU': r'^[0-9]{5}$',
'GW': r'^[0-9]{4}$',
'HM': r'^[0-9]{4}$',
'HN': r'^[0-9]{5}$',
'HR': r'^[0-9]{5}$',
'HT': r'^[0-9]{4}$',
'HU': r'^[0-9]{4}$',
'ID': r'^[0-9]{5}$',
'IL': r'^[0-9]{7}$',
'IM': r'^IM[0-9]{2,3}[A-Z]{2}$$',
'IN': r'^[0-9]{6}$',
'IO': r'^[A-Z]{4}[0-9][A-Z]{2}$',
'IQ': r'^[0-9]{5}$',
'IR': r'^[0-9]{5}-[0-9]{5}$',
'IS': r'^[0-9]{3}$',
'IT': r'^[0-9]{5}$',
'JE': r'^JE[0-9]{2}[A-Z]{2}$',
'JM': r'^JM[A-Z]{3}[0-9]{2}$',
'JO': r'^[0-9]{5}$',
'JP': r'^[0-9]{3}-?[0-9]{4}$',
'KE': r'^[0-9]{5}$',
'KG': r'^[0-9]{6}$',
'KH': r'^[0-9]{5}$',
'KR': r'^[0-9]{5}$',
'KY': r'^KY[0-9]-[0-9]{4}$',
'KZ': r'^[0-9]{6}$',
'LA': r'^[0-9]{5}$',
'LB': r'^[0-9]{8}$',
'LI': r'^[0-9]{4}$',
'LK': r'^[0-9]{5}$',
'LR': r'^[0-9]{4}$',
'LS': r'^[0-9]{3}$',
'LT': r'^(LT-)?[0-9]
|
{5}$',
'LU': r'^[0-9]{4}$',
'LV': r'^LV-[0-9]{4}$',
'LY': r'^[0-9]{5}$',
'MA': r'^[0-9]{5}$',
'MC': r'^980[0-9]{2}$',
'MD': r'^MD-?[0-9]{4}$',
'ME': r'^[0-9]{5}$',
'MF': r'^[0-9]{5}$',
'MG': r'^[
|
0-9]{3}$',
'MH': r'^[0-9]{5}$',
'MK': r'^[0-9]{4}$',
'MM': r'^[0-9]{5}$',
'MN': r'^[0-9]{5}$',
'MP': r'^[0-9]{5}$',
'MQ': r'^[0-9]{5}$',
'MT': r'^[A-Z]{3}[0-9]{4}$',
'MV': r'^[0-9]{4,5}$',
'MX': r'^[0-9]{5}$',
'MY': r'^[0-9]{5}$',
'MZ': r'^[0-9]{4}$',
'NA': r'^[0-9]{5}$',
'NC': r'^[0-9]{5}$',
'NE': r'^[0-9]{4}$',
'NF': r'^[0-9]{4}$',
'NG': r'^[0-9]{6}$',
'NI': r'^[0-9]{5}$',
'NL': r'^[0-9]{4}[A-Z]{2}$',
'NO': r'^[0-9]{4}$',
'NP': r'^[0-9]{5}$',
'NZ': r'^[0-9]{4}$',
'OM': r'^[0-9]{3}$',
'PA': r'^[0-9]{6}$',
'PE': r'^[0-9]{5}$',
'PF': r'^[0-9]{5}$',
'PG': r'^[0-9]{3}$',
'PH': r'^[0-9]{4}$',
'PK': r'^[0-9]{5}$',
'PL': r'^[0-9]{2}-?[0-9]{3}$',
'PM': r'^[0-9]{5}$',
'PN': r'^[A-Z]{4}[0-9][A-Z]{2}$',
'PR': r'^[0-9]{5}$',
'PT': r'^[0-9]{4}(-?[0-9]{3})?$',
'PW': r'^[0-9]{5}$',
'PY': r'^[0-9]{4}$',
'RE': r'^[0-9]{5}$',
'RO': r'^[0-9]{6}$',
'RS': r'^[0-9]{5}$',
'RU': r'^[0-9]{6}$',
'SA': r'^[0-9]{5}$',
'SD': r'^[0-9]{5}$',
'SE': r'^[0-9]{5}$',
'SG': r'^([0-9]{2}|[0-9]{4}|[0-9]{6})$',
'SH': r'^(STHL1ZZ|TDCU1ZZ)$',
'SI': r'^(SI-)?[0-9]{4}$',
'SK': r'^[0-9]{5}$',
'SM': r'^[0-9]{5}$',
'SN': r'^[0-9]{5}$',
'SV': r'^01101$',
'SZ': r'^[A-Z][0-9]{3}$',
'TC': r'^TKCA1ZZ$',
'TD': r'^[0-9]{5}$',
'TH': r'^[0-9]{5}$',
'TJ': r'^[0-9]{6}$',
'TM': r'^[0-9]{6}$',
'TN': r'^[0-9]{4}$',
'TR': r'^[0-9]{5}$',
'TT': r'^[0-9]{6}$',
'TW': r'^([0-9]{3}|[0-9]{5})$',
'UA': r'^[0-9]{5}$',
'US': r'^[0-9]{5}(-[0-9]{4}|-[0-9]{6})?$',
'UY': r'^[0-9]{5}$',
'UZ': r'^[0-9]{6}$',
'VA': r'^00120$',
'VC': r'^VC[0-9]{4}',
'VE': r'^[0-9]{4}[A-Z]?$',
'VG': r'^VG[0-9]{4}$',
'VI': r'^[0-9]{5}$',
'VN': r'^[0-9]{6}$',
'WF': r'^[0-9]{5}$',
'XK': r'^[0-9]{5}$',
'YT': r'^[0-9]{5}$',
'ZA': r'^[0-9]{4}$',
'ZM': r'^[0-9]{5}$',
}
title = models.CharField(
pgettext_lazy(u"Treatment Pronouns for the customer", u"Title"),
max_length=64, choices=TITLE_CHOICES, blank=True)
first_name = models.CharField(_("First name"), max_length=255, blank=True)
last_name = models.CharField(_("Last name"), max_length=255, blank=True)
# We use quite a few lines of an address as they are often quite long and
# it's easier to just hide the unnecessary ones than add extra ones.
line1 = models.CharField(_("First line of address"), max_length=255)
line2 = models.CharField(
_("Second line of address"), max_length=255, blank=True)
line3 = models.CharField(
_("Third line of address"), max_length=255, blank=True)
line4 = models.CharField(_("City"), max_length=255, blank=True)
state = models.CharField(_("State/County"), max_length=255, blank=True)
postcode = UppercaseCharField(
_("Post/Zip-code"), max_length=64, blank=True)
country = models.ForeignKey(
'address.Country',
on_delete=models.CASCADE,
verbose_name=_("Country"))
#: A field only used for searching addresses - this contains all the
#: relevant fields. This is effectively a poor man's Solr text field.
search_text = models.TextField(
_("Search text - used only for searching addresses"), editable=False)
def __str_
|
zahodi/ansible-mikrotik
|
pythonlibs/mt_api/__init__.py
|
Python
|
apache-2.0
| 12,353
| 0.001376
|
from __future__ import unicode_literals
import binascii
import hashlib
import logging
import socket
import ssl
import sys
from ansible.module_utils.mt_api.retryloop import RetryError
from ansible.module_utils.mt_api.retryloop import retryloop
from ansible.module_utils.mt_api.socket_utils import set_keepalive
PY2 = sys.version_info[0] < 3
logger = logging.getLogger(__name__)
class RosAPIError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
if isinstance(self.value, dict) and self.value.get('message'):
return self.value['message']
elif isinstance(self.value, list):
elements = (
'%s: %s' %
(element.__class__, str(element)) for element in self.value
)
return '[%s]' % (', '.join(element for element in elements))
else:
return str(self.value)
class RosAPIConnectionError(RosAPIError):
pass
class RosAPIFatalError(RosAPIError):
pass
class RosApiLengthUtils(object):
def __init__(self, api):
self.api = api
def write_lenght(self, length):
self.api.write_bytes(self.length_to_bytes(length))
def length_to_bytes(self, length):
if length < 0x80:
return self.to_bytes(length)
elif length < 0x4000:
length |= 0x8000
return self.to_bytes(length, 2)
elif length < 0x200000:
length |= 0xC00000
return self.to_bytes(length, 3)
elif length < 0x10000000:
length |= 0xE0000000
return self.to_bytes(length, 4)
else:
return self.to_bytes(0xF0) + self.to_bytes(length, 4)
def read_length(self):
b = self.api.read_bytes(1)
i = self.from_bytes(b)
if (i & 0x80) == 0x00:
return i
elif (i & 0xC0) == 0x80:
return self._unpack(1, i & ~0xC0)
elif (i & 0xE0) == 0xC0:
return self._unpack(2, i & ~0xE0)
elif (i & 0xF0) == 0xE0:
return self._unpack(3, i & ~0xF0)
elif (i & 0xF8) == 0xF0:
return self.from_bytes(self.api.read_bytes(1))
else:
raise RosAPIFatalError('Unknown value: %x' % i)
def _unpack(self, times, i):
temp1 = self.to_bytes(i)
temp2 = self.api.read_bytes(times)
try:
temp3 = temp2.decode('utf-8')
except:
try:
temp3 = temp2.decode('windows-1252')
except Exception:
print("Cannot decode response properly:", temp2)
print(Exception)
exit(1)
res = temp1 + temp3
return self.from_bytes(res)
if PY2:
def from_bytes(self, data):
data_values = [ord(char) for char in data]
value = 0
for byte_value in data_values:
value <<= 8
value += byte_value
return value
def to_bytes(self, i, size=1):
data = []
for _ in xrange(size):
data.append(chr(i & 0xff))
i >>= 8
return b''.join(reversed(data))
else:
def from_bytes(self, data):
return int.from_bytes(data, 'big')
def to_bytes(self, i, size=1):
return i.to_bytes(size, 'big')
class RosAPI(object):
"""Routeros api"""
def __init__(self, socket):
self.socket = socket
self.length_utils = RosApiLengthUtils(self)
def login(self, username, pwd):
for _, attrs in self.talk([b'/login']):
token = binascii.unhexlify(attrs[b'ret'])
hasher = hashlib.md5()
hasher.update(b'\x00')
hasher.update(pwd)
hasher.update(token)
self.talk([b'/login', b'=name=' + username,
b'=response=00' + hasher.hexdigest().encode('ascii')])
def talk(self, words):
if self.write_sentence(words) == 0:
return
output = []
while True:
input_sentence = self.read_sentence()
if not len(input_sentence):
continue
attrs = {}
reply = input_sentence.pop(0)
for line in input_sentence:
try:
second_eq_pos = line.index(b'=', 1)
except IndexError:
attrs[line[1:]] = b''
else:
attrs[line[1:second_eq_pos]] = line[second_eq_pos + 1:]
output.append((reply, attrs))
|
if reply == b'!done':
if output[0][0] == b'!trap':
raise RosAPIError(output[0][1])
if output[0][0] == b'!fatal':
self.socket.close()
raise RosAPIFatalError(output[0][1])
return output
def write_sentence(self, words):
words_written = 0
for word in words:
self.write_word(word)
words_written += 1
self.wr
|
ite_word(b'')
return words_written
def read_sentence(self):
sentence = []
while True:
word = self.read_word()
if not len(word):
return sentence
sentence.append(word)
def write_word(self, word):
logger.debug('>>> %s' % word)
self.length_utils.write_lenght(len(word))
self.write_bytes(word)
def read_word(self):
word = self.read_bytes(self.length_utils.read_length())
logger.debug('<<< %s' % word)
return word
def write_bytes(self, data):
sent_overal = 0
while sent_overal < len(data):
try:
sent = self.socket.send(data[sent_overal:])
except socket.error as e:
raise RosAPIConnectionError(str(e))
if sent == 0:
raise RosAPIConnectionError('Connection closed by remote end.')
sent_overal += sent
def read_bytes(self, length):
received_overal = b''
while len(received_overal) < length:
try:
received = self.socket.recv(
length - len(received_overal))
except socket.error as e:
raise RosAPIConnectionError(str(e))
if len(received) == 0:
raise RosAPIConnectionError('Connection closed by remote end.')
received_overal += received
return received_overal
class BaseRouterboardResource(object):
def __init__(self, api, namespace):
self.api = api
self.namespace = namespace
def call(self, command, set_kwargs, query_kwargs=None):
query_kwargs = query_kwargs or {}
query_arguments = self._prepare_arguments(True, **query_kwargs)
set_arguments = self._prepare_arguments(False, **set_kwargs)
query = ([('%s/%s' % (self.namespace, command)).encode('ascii')] +
query_arguments + set_arguments)
response = self.api.api_client.talk(query)
output = []
for response_type, attributes in response:
if response_type == b'!re':
output.append(self._remove_first_char_from_keys(attributes))
return output
@staticmethod
def _prepare_arguments(is_query, **kwargs):
command_arguments = []
for key, value in kwargs.items():
if key in ['id', 'proplist']:
key = '.%s' % key
key = key.replace('_', '-')
selector_char = '?' if is_query else '='
command_arguments.append(
('%s%s=' % (selector_char, key)).encode('ascii') + value)
return command_arguments
@staticmethod
def _remove_first_char_from_keys(dictionary):
elements = []
for key, value in dictionary.items():
key = key.decode('ascii')
if key in ['.id', '.proplist']:
key = key[1:]
elements.append((key, value))
return dict(elements)
def get(self, **kwargs):
return self.call('print', {}, kwargs)
def detailed_get(self, **kwargs):
return self.call('print', {'deta
|
hartwork/wnpp.debian.net
|
wnpp_debian_net/management/commands/importdebbugs.py
|
Python
|
agpl-3.0
| 15,162
| 0.00376
|
# Copyright (C) 2021 Sebastian Pipping <sebastian@pipping.org>
# Licensed under GNU Affero GPL v3 or later
import datetime
import re
import sys
from itertools import islice
from signal import SIGINT
from typing import Any
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.text import Truncator
from django.utils.timezone import now
from ...debbugs import DebbugsRequestError, DebbugsRetry, DebbugsWnppClient, IssueProperty
from ...models import DebianLogIndex, DebianLogMods, DebianPopcon, DebianWnpp, EventKind
from ._common import ReportingMixin
_BATCH_SIZE = 100
_MAXIMUM_STALE_DELTA = datetime.timedelta(hours=2)
class _MalformedSubject(ValueError):
pass
class Command(ReportingMixin, BaseCommand):
help = "Import remote WNPP issues from Debbugs' SOAP service into the local database"
def _close_all_issues_but(self, ids_of_open_wnpp_issues):
self._notice('[1/3] Closing issues locally that have been closed remotely...')
log_entries_to_create: list[DebianLogIndex] = []
for issue in DebianWnpp.objects.exclude(ident__in=ids_of_open_wnpp_issues).iterator():
self._notice(f'Detected that issue #{issue.ident} has been as closed, remotely')
log_entries_to_create.append(
self._create_log_entry_from(issue, EventKind.CLOSED, now()))
if log_entries_to_create:
with transaction.atomic():
DebianLogIndex.objects.bulk_create(log_entries_to_create)
self._success(f'Logged closing of {len(log_entries_to_create)} issue(s)')
DebianWnpp.objects.exclude(ident__in=ids_of_open_wnpp_issues).delete()
self._success(f'Deleted {len(log_entries_to_create)} issue(s)')
else:
self._notice('No existing issues deleted.')
def _fetch_issues(self, issue_ids: list[int]) -> dict[int, dict[str, str]]:
flat_issue_ids = ', '.join(str(i) for i in issue_ids)
self._notice(f'Fetching {len(issue_ids)} issue(s): {flat_issue_ids}...')
properties_of_issue = DebbugsRetry(self._client.fetch_issues,
notify=self._notice)(issue_ids)
return properties_of_issue
@staticmethod
def _create_missing_pocons_for(package_names: list[str]) -> list[DebianPopcon]:
existing_packages = set(
DebianPopcon.objects.filter(package__in=package_names).values_list('package',
flat=True))
missing_packages = package_names - existing_packages
return [DebianPopcon(package=package) for package in missing_packages]
def _add_any_new_issues_from(self, ids_of_remote_open_issues):
ids_of_issues_already_known_locally = set(
DebianWnpp.objects.values_list('ident', flat=True))
ids_of_new_issues_to_create = sorted(
set(ids_of_remote_open_issues) - ids_of_issues_already_known_locally)
count_issues_left_to_import = len(ids_of_new_issues_to_create)
self._notice(
f'[2/3] Starting to import {count_issues_left_to_import} '
f'(={len(ids_of_remote_open_issues)}-{len(ids_of_issues_already_known_locally)})'
' new remote issue(s) locally...')
it = iter(ids_of_new_issues_to_create)
while True:
issue_ids = list(islice(it, 0, _BATCH_SIZE))
if not issue_ids:
break
self._notice(
f'Importing next {min(_BATCH_SIZE, count_issues_left_to_import)} issue(s) of {count_issues_left_to_import} left to import...'
)
count_issues_left_to_import -= _BATCH_SIZE
log_entries_to_create: list[DebianLogIndex] = []
issues_to_create: list[DebianWnpp] = []
remote_properties_of_issue = self._fetch_issues(issue_ids)
future_local_properties_of_issue, popcons_to_create = self._analyze_remote_properties(
remote_properties_of_issue)
for issue_id, properties in future_local_properties_of_issue.items():
issue = DebianWnpp(**properties)
issues_to_create.append(issue)
log_entries_to_create.append(
self._create_log_entry_from(issue, EventKind.OPENED, issue.open_stamp))
if issues_to_create:
with transaction.atomic():
if popcons_to_create:
DebianPopcon.objects.bulk_create(popcons_to_create)
self._success(f'Created {len(popcons_to_create)} missing popcon entries')
DebianLogIndex.objects.bulk_create(log_entries_to_create)
self._success(
f'Logged upcoming creation of {len(log_entries_to_create)} issue(s)')
DebianWn
|
pp.objects.bulk_create(issues_to_create)
self._success(f'Created {len(issues_to_create)} new issues')
else:
self._notice('No new issues created.')
def _analyze_remote_properties(sel
|
f, remote_properties_of_issue):
future_local_properties_of_issue: dict[int, dict[str, Any]] = {}
for issue_id, properties in remote_properties_of_issue.items():
self._notice(f'Processing upcoming issue {issue_id}...')
try:
future_local_properties_of_issue[issue_id] = self._to_database_keys(
issue_id, properties)
except _MalformedSubject as e:
self._error(str(e))
continue
# NOTE: PostgreSQL is not forgiving about absent foreign keys,
# so we'll need to create any missing DebianPopcon instances
# before creating the related DebianWnpp instances.
involved_package = {
properties['popcon_id']
for properties in future_local_properties_of_issue.values()
}
popcons_to_create = self._create_missing_pocons_for(involved_package)
return future_local_properties_of_issue, popcons_to_create
def _update_stale_existing_issues(self, ids_of_remote_open_issues):
stale_issues_qs = DebianWnpp.objects.filter(ident__in=ids_of_remote_open_issues,
cron_stamp__lt=now() - _MAXIMUM_STALE_DELTA)
count_issues_left_to_update = stale_issues_qs.count()
self._notice(
f'[3/3] Starting to apply remote changes to {count_issues_left_to_update} stale local issue(s)...'
)
if not count_issues_left_to_update:
self._notice('No stale issues found, none updated.')
return
while True:
log_entries_to_create: list[DebianLogIndex] = []
kind_change_log_entries_to_create: list[DebianLogMods] = []
issues_to_update: list[DebianWnpp] = list(
stale_issues_qs.order_by('cron_stamp', 'ident')[:_BATCH_SIZE])
if not issues_to_update:
break
self._notice(
f'Updating next {min(_BATCH_SIZE, count_issues_left_to_update)} stale issue(s) of {count_issues_left_to_update} left to update...'
)
count_issues_left_to_update -= _BATCH_SIZE
issue_ids = [issue.ident for issue in issues_to_update]
issue_fields_to_bulk_update: set[str] = {
'cron_stamp',
} # will be grown as needed
# Fetch remote data
remote_properties_of_issue = self._fetch_issues(issue_ids)
future_local_properties_of_issue, popcons_to_create = self._analyze_remote_properties(
remote_properties_of_issue)
# Turn remote data into database instances (to persist later)
for i, issue in enumerate(issues_to_update):
try:
database_field_map = future_local_properties_of_issue[issue.ident]
except KeyError: # when self._analyze_remote_properties had to drop the issue
|
Mlieou/oj_solutions
|
leetcode/python/ex_424.py
|
Python
|
mit
| 629
| 0.00159
|
class Solution(object):
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
count = [0] * 26
res = char_count = start = end = 0
while end <
|
len(s):
count[ord(s[end]) - ord('A')] += 1
char_count = max(char_count, count[ord(s[end]) - ord('A')])
|
end += 1
while end - start - char_count > k:
count[ord(s[start]) - ord('A')] -= 1
start += 1
char_count = max(count + [char_count])
res = max(end - start, res)
return res
|
labordoc/labordoc-next
|
modules/webdeposit/lib/deposition_fields/issn_field.py
|
Python
|
gpl-2.0
| 1,643
| 0.008521
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU
|
General Public License
## along with Invenio; if not, write to the Free
|
Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from wtforms import TextField
from invenio.webdeposit_field import WebDepositField
from invenio.webdeposit_validation_utils import sherpa_romeo_issn_validate
__all__ = ['ISSNField']
class ISSNField(WebDepositField(key='issn'), TextField):
def __init__(self, **kwargs):
super(ISSNField, self).__init__(**kwargs)
self._icon_html = '<i class="icon-barcode"></i>'
def pre_validate(self, form=None):
# Load custom validation
validators = self.config.get_validators()
if validators is not [] and validators is not None:
validation_json = {}
for validator in validators:
json = validator(self)
validation_json = self.merge_validation_json(validation_json, json)
return validation_json
return sherpa_romeo_issn_validate(self)
|
starofrainnight/rabird.pyside
|
tests/__init__.py
|
Python
|
mit
| 63
| 0
|
# -*- coding: utf-8 -
|
*-
"""Unit test
|
package for qt-aider."""
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20170717A.py
|
Python
|
mit
| 795
| 0.013836
|
"""
[2017-07-17] Challenge #324 [Easy] "manual" square root procedure (intermediate)
https://www.reddit.com/r/dailyprogrammer/comments/6nstip/20170717_challenge_324_easy_manual_square_root/
Write a program that outputs the highest number that is lower or equal than the square root of the given number, with
the given number of decimal fract
|
ion digits.
Use this technique, (do not use your language's built in square root function):
https://medium.com/i-math/how-to-find-square-roots-by-hand-f3f7cadf94bb
**input format: 2 numbers:** precision-digits Number
**sample input**
0 7720.17
1 7720.17
2 7720.17
**sample output**
87
87.8
87.86
**c
|
hallenge inputs**
0 12345
8 123456
1 12345678901234567890123456789
"""
def main():
pass
if __name__ == "__main__":
main()
|
tjcsl/ion
|
intranet/apps/bus/consumers.py
|
Python
|
gpl-2.0
| 3,019
| 0.001987
|
from asgiref.sync import async_to_sync
from channels.generic.websocket import JsonWebsocketConsumer
from django.conf import settings
from django.utils import timezone
from .models import Route
class BusConsumer(JsonWebsocketConsumer):
groups = ["bus"]
def connect(self):
self.user = sel
|
f.scope["user"]
headers = dict(self.scope["headers"])
remote_addr = headers[b"x-real-ip"].decode() if b"x-real-ip" in headers else self.scope["client"][0]
if (not self.user.is_authenticated
|
or self.user.is_restricted) and remote_addr not in settings.INTERNAL_IPS:
self.connected = False
self.close()
return
self.connected = True
data = self._serialize(user=self.user)
self.accept()
self.send_json(data)
def receive_json(self, content): # pylint: disable=arguments-differ
if not self.connected:
return
if content.get("type") == "keepalive":
self.send_json({"type": "keepalive-response"})
return
if self.user is not None and self.user.is_authenticated and self.user.is_bus_admin:
try:
if self.within_time_range(content["time"]):
route = Route.objects.get(id=content["id"])
route.status = content["status"]
if content["time"] == "afternoon" and route.status == "a":
route.space = content["space"]
else:
route.space = ""
route.save()
data = self._serialize()
async_to_sync(self.channel_layer.group_send)("bus", {"type": "bus.update", "data": data})
except Exception as e:
# TODO: Add logging
print(e)
self.send_json({"error": "An error occurred."})
else:
self.send_json({"error": "User does not have permissions."})
def bus_update(self, event):
if not self.connected:
return
self.send_json(event["data"])
def _serialize(self, user=None):
all_routes = Route.objects.all()
data = {}
route_list = []
for route in all_routes:
serialized = {
"id": route.id,
"bus_number": route.bus_number,
"space": route.space,
"route_name": route.route_name,
"status": route.status,
}
route_list.append(serialized)
if user and user in route.user_set.all():
data["userRouteId"] = route.id
data["allRoutes"] = route_list
return data
def within_time_range(self, time):
now_hour = timezone.localtime().hour
within_morning = now_hour < settings.BUS_PAGE_CHANGEOVER_HOUR and time == "morning"
within_afternoon = now_hour >= settings.BUS_PAGE_CHANGEOVER_HOUR and time == "afternoon"
return within_morning or within_afternoon
|
saltstack/pytest-logging
|
setup.py
|
Python
|
apache-2.0
| 2,340
| 0.000427
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, with_statement
import os
import sys
import codecs
from setuptools import setup, find_packages
# Change to source's directory prior to running any command
try:
SETUP_DIRNAME = os.path.dirname(__file__)
except NameError:
# We're most likely being frozen and __file__ triggered this NameError
# Let's work around that
SETUP_DIRNAME = os.path.dirname(sys.argv[0])
if SETUP_DIRNAME != '':
os.chdir(SETUP_DIRNAME)
def read(fname):
'''
Read a file from the directory where setup.py resides
'''
file_path = os.path.join(SETUP_DIRNAME, fname)
with codecs.open(file_path, encoding='utf-8') as rfh:
return rfh.read()
# Version info -- read without importing
_LOCALS = {}
with open(os.path.join(SETUP_DIRNAME, 'pytest_logging', 'version.py')) as rfh:
exec(rfh.read(), None, _LOCALS
|
) # pylint: disable=exec-used
VERSION = _LOCALS['__version__']
LONG_DESCRIPTION = read('README.rst')
setup(
name='pytest-logging',
version=VERSION,
aut
|
hor='Pedro Algarvio',
author_email='pedro@algarvio.me',
maintainer='Pedro Algarvio',
maintainer_email='pedro@algarvio.me',
license='MIT',
url='https://github.com/saltstack/pytest-logging',
description='Configures logging and allows tweaking the log level with a py.test flag',
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=['pytest>=2.8.1'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
],
entry_points={
'pytest11': [
'logging = pytest_logging.plugin',
],
},
)
|
villaverde/iredadmin
|
libs/languages.py
|
Python
|
gpl-2.0
| 3,515
| 0
|
# encoding: utf-8
# Author: Zhang Huangbin <zhb@iredmail.org>
import os
import glob
import web
langmaps = {
'en_US': u'English (US)',
'sq_AL': u'Albanian',
'ar_SA': u'Arabic',
'hy_AM': u'Armenian',
'az_AZ': u'Azerbaijani',
'bs_BA': u'Bosnian (Serbian Latin)',
'bg_BG': u'Bulgarian',
'ca_ES': u'Català',
'cy_GB': u'Cymraeg',
'hr_HR': u'Croatian (Hrvatski)',
'cs_CZ': u'Čeština',
'da_DK': u'Dansk',
'de_DE': u'Deutsch (Deutsch)',
'de_CH': u'Deutsch (Schweiz)',
'en_GB': u'English (GB)',
'es_ES': u'Español',
'eo': u'Esperanto',
'et_EE': u'Estonian',
'eu_ES': u'Euskara (Basque)',
'fi_FI': u'Finnish (Suomi)',
'nl_BE': u'Flemish',
'fr_FR': u'Français',
'gl_ES': u'Galego (Galician)',
'ka_GE': u'Georgian (Kartuli)',
'el_GR': u'Greek',
'he_IL': u'Hebrew',
'hi_IN': u'Hindi',
'hu_HU': u'Hungarian',
'is_IS': u'Icelandic',
'id_ID': u'Indonesian',
'ga_IE': u'Irish',
'it_IT': u'Italiano',
'ja_JP': u'Japanese (日本語)',
'ko_KR': u'Korean',
'ku': u'Kurdish (Kurmancî)',
'lv_LV': u'Latvian',
'lt_LT': u'Lithuanian',
'mk_MK': u'Macedonian',
'ms_MY': u'Malay',
'nl_NL': u'Netherlands',
'ne_NP': u'Nepali',
'nb_NO': u'Norsk (Bokmål)',
'nn_NO': u'Norsk (Nynorsk)',
'fa': u'Persian (Farsi)',
'pl_PL': u'Polski',
'pt_BR': u'Portuguese (Brazilian)',
'pt_PT': u'Portuguese (Standard)',
'ro_RO': u'Romanian',
'ru_RU': u'Русский',
'sr_CS': u'Serbian (Cyrillic)',
'si_LK': u'Sinhala',
'sk_SK': u'Slovak',
'sl_SI': u'Slovenian',
'sv_SE': u'Swedish (Svenska)',
'th_TH': u'Thai',
'tr_TR': u'Türkçe',
'uk_UA': u'Ukrainian',
'vi_VN': u'Vietnamese',
'zh_CN': u'简体中文',
'zh_TW': u'繁體中文',
}
# All available timezone names and time offsets (in minutes).
allTimezonesOffsets = {
'GMT-12:00': -720,
'GMT-11:00': -660,
'GMT-10:00': -600,
'GMT-09:30': -570,
'GMT-09:00': -540,
'GMT-08:00': -480,
'GMT-07:00': -420,
'GMT-06:00': -360,
'GMT-05:00': -300,
'GMT-04:30': -270,
'GMT-04:00': -240,
'GMT-03:30': -210,
'GMT-03:00': -180,
'GMT-02:00': -120,
'GMT-01:00': -60,
'GMT': 0,
'GMT+01:00': 60,
'GMT+02:00': 120,
'GMT+03:00': 180,
'GMT+03:30': 210,
'GMT+04:00': 240,
'GMT+04:30': 270,
'GMT+05:00': 300,
'GMT+05:30': 330,
'GMT+05:45': 345,
'GMT+06:00': 360,
|
'GMT+06:30': 390,
'GMT+07:00': 420,
'GMT+08:00': 480,
'GMT+08:45': 525,
'GMT+09:00': 540,
'GMT+09:30': 570,
'GMT+10:00': 600,
'GMT+10:30': 630,
'GMT+11:00': 660,
'GMT+11:30': 690,
'GMT+12:00': 720,
'GMT+12:45': 765,
'GMT+13:00': 780,
'GMT+14:00': 840,
}
# Get available languages.
def get_language_maps():
# Get available languages.
rootdir = os.path.abspath(os.path.dirname(__file__)) + '/../'
available_lang
|
s = [
web.safestr(os.path.basename(v))
for v in glob.glob(rootdir + 'i18n/[a-z][a-z]_[A-Z][A-Z]')
if os.path.basename(v) in langmaps]
available_langs += [
web.safestr(os.path.basename(v))
for v in glob.glob(rootdir + 'i18n/[a-z][a-z]')
if os.path.basename(v) in langmaps]
available_langs.sort()
# Get language maps.
languagemaps = {}
for i in available_langs:
if i in langmaps:
languagemaps.update({i: langmaps[i]})
return languagemaps
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/test/jacapo/jacapo.py
|
Python
|
gpl-2.0
| 1,380
| 0.01087
|
# do some tests here before we import
# Right version of Scientific?
from ase.test import NotAvailable
import os
try:
import Scientific
version = Scientific.__version__.split(".")
print 'Found ScientificPython version: ',Scientific.__version__
if map(int,version) < [2,8]:
print 'ScientificPython 2.8 or greater required for numpy support in NetCDF'
raise NotAvailable('ScientificPython version 2.8 or greater is required')
except (ImportError, NotAvailable):
print "No Scientific python found. Check your PYTHONPATH"
raise NotAvailable('ScientificPython version 2.8 or greater is required')
if not (os.system('which dacapo.run') == 0):
print "No Dacapo Fortran executable (dacapo.run) found. Check your path settings."
raise NotAvailable('dacapo.run is not installed on this machine or not in the path')
# Now Scientific 2.8 and dacapo.run should both be available
from ase import Atoms, Ato
|
m
from ase.calculators.jacapo import Jacapo
atoms = Atoms([Atom('H',[0,0,0])],
cell=(2,2,2))
calc = Jacapo('Jacapo-test.nc',
pw=200,
nbands=2,
|
kpts=(1,1,1),
spinpol=False,
dipole=False,
symmetry=False,
ft=0.01)
atoms.set_calculator(calc)
print atoms.get_potential_energy()
os.system('rm -f Jacapo-test.nc Jacapo-test.txt')
|
electronic-library/electronic-library-core
|
library/exceptions.py
|
Python
|
gpl-3.0
| 62
| 0.016129
|
"""
Conta
|
ins exception classes specific to this project.
"""
| |
ranji2612/leetCode
|
combinationSum.py
|
Python
|
gpl-2.0
| 622
| 0.016077
|
# Combination Sum
# https://leetcode.com/problems/combination-sum/
class Solution(object):
def combinationSum(self, candidates, target):
"
|
""
:type candidates: List[int]
:type target: int
:rtype: Li
|
st[List[int]]
"""
if len(candidates)==0 or target<=0:
return [[]] if target==0 else []
candidates.sort()
j = len(candidates)-1
res = []
while j>=0:
for x in self.combinationSum(candidates[:j+1],target-candidates[j]):
res.append(x+[candidates[j]])
j-=1
return res
|
Karosuo/Linux_tools
|
xls_handlers/xls_sum_venv/lib/python3.6/site-packages/pip/_internal/configuration.py
|
Python
|
gpl-3.0
| 13,243
| 0
|
"""Configuration management setup
Some terminology:
- name
As written in config files.
- value
Value associated with a name
- key
Name combined with it's section (section.name)
- variant
A single word describing where the configuration key-value pair came from
"""
import locale
import logging
import os
from pip._vendor import six
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import (
ConfigurationError, ConfigurationFileCouldNotBeLoaded,
)
from pip._internal.locations import (
legacy_config_file, new_config_file, running_under_virtualenv,
site_config_files, venv_config_file,
)
from pip._internal.utils.misc import ensure_dir, enum
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Any, Dict, Iterable, List, NewType, Optional, Tuple
)
RawConfigParser = configparser.RawConfigParser # Shorthand
Kind = NewType("Kind", str)
logger = logging.getLogger(__name__)
# NOTE: Maybe use the optionx attribute to normalize keynames.
def _normalize_name(name):
# type: (str) -> str
"""Make a name consistent regardless of source (environment or file)
"""
name = name.lower().replace('_', '-')
if name.startswith('--'):
name = name[2:] # only prefer long opts
return name
def _disassemble_key(name):
# type: (str) -> List[str]
return name.split(".", 1)
# The kinds of configurations there are.
kinds = enum(
USER="user", # User Specific
GLOBAL="global", # System Wide
VENV="venv", # Virtual Environment Specific
ENV="env", # from PIP_CONFIG_FILE
ENV_VAR="env-var", # from Environment Variables
)
class Configuration(object):
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated, load_only=None):
# type: (bool, Kind) -> None
super(Configuration, self).__init__()
_valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None]
if load_only not in _valid_load_only:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, _valid_load_only[:-1]))
)
)
self.isolated = isolated # type: bool
self.load_only = load_only # type: Optional[Kind]
# The order here determines the override order.
self._override_order = [
kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR
]
self._ignore_env_names = ["version", "help"]
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in self._override_order
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in self._override_order
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]]
def load(self):
# type: () -> None
"""Loads configuration from configuration files and environment
"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self):
# type: () -> Optional[str]
"""Returns the file with highest priority in configuration
"""
assert
|
self.load_only is not None, \
"Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self):
# type: () -> Iterable[Tuple[str, Any]]
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key)
|
:
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key))
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key):
# type: (str) -> None
"""Unset a value in the configuration.
"""
self._ensure_have_load_only()
if key not in self._config[self.load_only]:
raise ConfigurationError("No such key - {}".format(key))
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Remove the key in the parser
modified_something = False
if parser.has_section(section):
# Returns whether the option was removed or not
modified_something = parser.remove_option(section, name)
if modified_something:
# name removed from parser, section may now be empty
section_iter = iter(parser.items(section))
try:
val = six.next(section_iter)
except StopIteration:
val = None
if val is None:
parser.remove_section(section)
self._mark_as_modified(fname, parser)
else:
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
del self._config[self.load_only][key]
def save(self):
# type: () -> None
"""Save the currentin-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f) # type: ignore
#
# Private routines
#
def _ensure_have_load_only(self):
# type: () -> None
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self):
# type: () -> Dict[str, Any]
"""A dictionary representing the loaded configuration.
"""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in self._override_order:
retval.update(self._config[variant])
return retval
def _load_config_files(self):
# type: () -> None
"""Loads configuration from configuration files
"""
config_files = dict(self._iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set
|
pymfony/pymfony
|
src/pymfony/component/config/resource.py
|
Python
|
mit
| 5,243
| 0.010872
|
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import os.path;
import re;
from pickle import dumps as serialize;
from pickle import loads as unserialize;
from pymfony.component.system import Object;
from pymfony.component.system import SerializableInterface;
from pymfony.component.system.oop import interface;
"""
"""
@interface
class ResourceInterface(Object):
"""ResourceInterface is the interface that must be implemented
by all Resource classes.
@author Fabien Potencier <fabien@symfony.com>
"""
def __str__(self):
"""Returns a string representation of the Resource.
@return string A string representation of the Resource
"""
pass;
def isFresh(self, timestamp):
"""Returns true if the resource has not been updated
since the given timestamp.
@param timestamp: int The last time the resource was loaded
@return: Boolean True if the resource has not been updated, false otherwise
"""
pass;
def getResource(self):
"""Returns the resource tied to this Resource.
@return: mixed The resource
"""
pass;
class FileResource(ResourceInterface, SerializableInterface):
"""FileResource represents a resource stored on the filesystem.
The resource can be a file or a directory.
@author Fabien Potencier <fabien@symfony.com>
"""
def __init__(self, resource):
"""Constructor.
@param string $resource The file path to the resource
"""
if resource:
self.__resource = str(os.path.realpath(str(resource)));
else:
self.__resource = '';
def __str__(self):
"""Returns a string representation of the Resource.
@return string A string representation of the Resource
"""
return self.__resource;
def getResource(self):
"""Returns the resource tied to this Resource.
@return mixed The resource
"""
return self.__resource;
def isFresh(self, timestamp):
"""Returns true if the resource has not been updated since the given timestamp.
@param timestamp: integer The last time the resource was loaded
@return Boolean true if the resource has not been updated, false otherwise
"""
if not os.path.exists(self.__resource):
return False;
return os.path.getmtime(self.__resource) < timestamp;
def serialize(self):
return serialize(self.__resource);
def unserialize(self, serialized):
self.__resource = unserialize(serialized);
class DirectoryResource(ResourceInterface, SerializableInterface):
"""DirectoryResource represents a resources sto
|
red i
|
n a subdirectory tree.
@author Fabien Potencier <fabien@symfony.com>
"""
def __init__(self, resource, pattern = None):
"""Constructor.
@param string resource The file path to the resource
@param string pattern A pattern to restrict monitored files
"""
self.__resource = None;
self.__pattern = None;
self.__resource = resource;
self.__pattern = pattern;
def __str__(self):
"""Returns a string representation of the Resource.
@return string A string representation of the Resource
"""
return str(self.__resource);
def getResource(self):
"""Returns the resource tied to this Resource.
@return mixed The resource
"""
return self.__resource;
def getPattern(self):
return self.__pattern;
def isFresh(self, timestamp):
"""Returns True if the resource has not been updated since the given timestamp.:
@param integer timestamp The last time the resource was loaded
@return Boolean True if the resource has not been updated, False otherwise:
"""
if ( not os.path.isdir(self.__resource)) :
return False;
newestMTime = os.path.getmtime(self.__resource);
for root, dirs, files in os.walk(self.__resource, followlinks=True):
for filename in files + dirs:
filename = '/'.join([root, filename]);
# if regex filtering is enabled only check matching files:
if (self.__pattern and os.path.isfile(filename) and not re.search(self.__pattern, os.path.basename(filename))) :
continue;
# always monitor directories for changes, except the .. entries
# (otherwise deleted files wouldn't get detected)
if os.path.isdir(filename) and '/..' == filename[-3:] :
continue;
newestMTime = max(os.path.getmtime(filename), newestMTime);
return newestMTime < timestamp;
def serialize(self):
return serialize([self.__resource, self.__pattern]);
def unserialize(self, serialized):
self.__resource, self.__pattern = unserialize(serialized);
|
ComfyLabs/beefeater
|
users/views/registration.py
|
Python
|
apache-2.0
| 281
| 0
|
from rest_framework import generics
from ..serializers import UserSerializer
cl
|
ass UserRegistration(generics.CreateAPIView):
"""
This is
|
basically an API to create a user.
This currently provides no email functionality.
"""
serializer_class = UserSerializer
|
texta-tk/texta
|
dataset_importer/document_reader/readers/entity/rtf_reader.py
|
Python
|
gpl-3.0
| 750
| 0.02
|
from entity_reader import EntityReader
import textract
from dataset_importer.utils import HandleDatasetImportException
class RTFReader(EntityReader):
@staticmethod
def get_features(**kwargs):
directory = kwargs['directory']
for file_path in RTFReader.get_file_list(directory, 'rtf'):
try:
features = RTFReader.get_meta_features(file_path=file_path)
features['text'] = textract.process(file_path).decode('utf8')
features['_texta_id'] = file_pa
|
th
yield features
except Exception as e:
HandleDatasetImportException(kwargs, e, file_path=file_path)
@staticmethod
def count_total_documents(**kwargs):
directory = kwargs['directory']
return RTFReader.count_documents(root_dire
|
ctory=directory, extension='rtf')
|
Qwaz/solved-hacking-problem
|
TWCTF/2019/php_note/solver.py
|
Python
|
gpl-2.0
| 982
| 0.004073
|
import requests
URL = "http://phpnote.chal.ctf.westerns.tokyo/"
def trigger(c, idx):
import string
sess = requests.Session()
# init session
sess.post(URL + '/?action=login', data={'realname': 'new_session'})
# manipulate session
p = '''<script>f=function(n){eval('X5O!P%@AP[4\\\\PZX54(P^)7CC)7}$$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$$H+H'+{${c}:'*'}[Math.min(${c},n)])};f(document.body.innerHTML[${idx}].charCodeAt(0));</script><body>'''
p = string.Template(p).substitute({'idx': idx, 'c': c})
resp = sess.post(URL + '/?ac
|
tion=login', data={'realname': '"http://127.0.0.1/flag?a=' + p, 'nickname': '</body>'})
return "<h1>Welcome" not in resp.text
def leak(idx):
l, h = 0, 0x100
|
while h - l > 1:
m = (h + l) // 2
if trigger(m, idx):
l = m
else:
h = m
return chr(l)
# "2532bd172578d19923e5348420e02320"
secret = ''
for i in range(14, 14+34):
secret += leak(i)
print(secret)
|
nischu7/paramiko
|
paramiko/hostkeys.py
|
Python
|
lgpl-2.1
| 12,117
| 0.000825
|
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{HostKeys}
"""
import base64
import binascii
from Crypto.Hash import SHA, HMAC
try:
import UserDict
import DictMixin
except:
from collections import UserDict
#from collections import MutableMapping as DictMixin
from collections import Mapping as DictMixin
from paramiko.common import *
from paramiko.dsskey import DSSKey
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.util import get_logger
class InvalidHostKey(Exception):
def __init__(self, line, exc):
self.line = line
self.exc = exc
self.args = (line, exc)
class HostKeyEntry:
"""
Representation of a line in an OpenSSH-style "known hosts" file.
"""
def __init__(self, hostnames=None, key=None):
self.valid = (hostnames is not None) and (key is not None)
self.hostnames = hostnames
self.key = key
def from_line(cls, line, lineno=None):
"""
Parses the given line of text to find the names for the host,
the type of key, and the key data. The line is expected to be in the
format used by the openssh known_hosts file.
Lines are expected to not have leading or trailing whitespace.
We don't bother to check for comments or empty lines. All of
that should be taken care of before sending the line to us.
@param line: a line from an OpenSSH known_hosts file
@type line: str
"""
log = get_logger('paramiko.hostkeys')
fields = line.split(' ')
if len(fields) < 3:
# Bad number of fields
log.info("Not enough fields found in known_hosts in line %s (%r)" %
(lineno, line))
return None
fields = fields[:3]
names, keytype, key = fields
names = names.split(',')
# Decide what kind of key we're looking at and create an object
# to hold it accordingly.
try:
if keytype == 'ssh-rsa':
key = RSAKey(data=base64.decodebytes(key.encode()))
elif keytype == 'ssh-dss':
key = DSSKey(data=base64.decodebytes(key.encode()))
elif keytype == 'ecdsa-sha2-nistp256':
key = ECDSAKey(data=base64.decodebytes(key.encode()))
else:
log.info("Unable to handle key of type %s" % (keytype,))
return None
except binascii.Error as e:
raise InvalidHostKey(line, e)
return cls(names, key)
from_line = classmethod(from_line)
def to_line(self):
"""
Returns a string in OpenSSH known_hosts file format, or None if
the object is not in a valid state. A trailing newline is
included.
"""
if self.valid:
return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(),
self.key.get_base64())
return None
def __repr__(self):
return '<HostKeyEntry %r: %r>' % (self.hostnames, self.key)
class HostKeys (DictMixin):
"""
Representation of an openssh-style "known hosts" file. Host keys can be
read from one or more files, and then individual hosts can be looked up to
verify server keys during SSH negotiation.
A HostKeys object can be treated like a dict; any dict lookup is equivalent
to calling L{lookup}.
@since: 1.5.3
"""
def __init__(self, filename=None):
"""
Create a new HostKeys object, optionally loading keys from an openssh
style host-key file.
@param filename: filename to load host keys from, or C{None}
@type filename: str
"""
# emulate a dict of { hostname: { keytype: PKey } }
self._entries = []
if filename is not None:
self.load(filename)
def add(self, hostname, keytype, key):
"""
Add a host key entry to the table. Any existing entry for a
C{(hostname, keytype)} pair will be replaced.
@param hostname: the hostname (or IP) to add
@type hostname: str
@param keytype: key type (C{"ssh-rsa"} or C{"ssh-dss"})
@type keytype: str
@param key: the key to add
@type key: L{PKey}
"""
for e in self._entries:
if (hostname in e.hostnames) and (e.key.get_name() == keytype):
e.key = key
return
self._entries.append(HostKeyEntry([hostname], key))
def load(self, filename):
"""
Read a file of known SSH host keys, in the format used by openssh.
This type of file unfortunately doesn't exist on Windows, but on
posix, it will usually be stored in
C{os.path.expanduser("~/.ssh/known_hosts")}.
If this method is called multiple times, the host keys are merged,
not cleared. So multiple calls to C{load} will just call L{add},
replacing any existing entries and adding new ones.
@param filename: name of the file to read host keys from
@type filename: str
@raise IOError: if there was an error reading the file
"""
f = open(filename, 'r')
for lineno, line in enumerate(f):
line = line.strip()
if (len(line) == 0) or (line[0] == '#'):
continue
e = HostKeyEntry.from_line(line, lineno)
if e is not None:
_hostnames = e.hostnames
for h in _hostnames:
if self.check(h, e.key):
e.h
|
ostnames.remove(h)
if len(e.hostnames):
self._entries.append(e)
f.close()
def save(self, filename):
"""
Save host keys into a file, in the format used by openssh. The order of
keys in the file will be preserved when possible (if these keys were
loaded from a file originally). The single exception is that combined
lines will be split i
|
nto individual key lines, which is arguably a bug.
@param filename: name of the file to write
@type filename: str
@raise IOError: if there was an error writing the file
@since: 1.6.1
"""
f = open(filename, 'w')
for e in self._entries:
line = e.to_line()
if line:
f.write(line)
f.close()
def lookup(self, hostname):
"""
Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey})
"""
class SubDict (DictMixin):
def __init__(self, hostname, entries, hostkeys):
self._hostname = hostname
self._entries = entries
self._hostkeys = hostkeys
def __len__(self):
return len(self.keys())
def __iter__(self):
return self.keys().__iter__()
def __getitem__(self, key):
for e in self._entries:
if e.key.get_name() == key:
|
stackforge/python-tackerclient
|
tackerclient/tacker/v1_0/nfvo/vnffgd.py
|
Python
|
apache-2.0
| 3,389
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from oslo_serialization import jsonutils
from tackerclient.i18n import _
from tackerclient.tacker import v1_0 as tackerV10
_VNFFGD = "vnffgd"
class ListVNFFGD(tackerV10.ListCommand):
"""List VNFFGDs that belong to a given tenant."""
resource = _VNFFGD
list_columns = ['id', 'name', 'template_source', 'description']
def get_parser(self, prog_name):
parser = super(ListVNFFGD, self).get_parser(prog_name)
parser.add_argument(
'--template-source',
help=_("List VNFFGD with specified template source. Available \
options are 'onboarded' (default), 'inline' or 'all'"),
action='store',
default='onboarded')
return parser
def args2search_opts(self, parsed_args):
search_opts = super(ListVNFFGD, self).args2search_opts(parsed_args)
template_s
|
ource = parsed_args.template_source
if parsed_args.template_source:
search_opts.update({'template_source': template_source})
return search_opts
class ShowVNFFGD(tackerV10.ShowCommand):
"""Show information of a given VNFFGD."""
resource = _VNFFGD
class CreateVNFFGD(tackerV10.CreateCommand):
""
|
"Create a VNFFGD."""
resource = _VNFFGD
remove_output_fields = ["attributes"]
def add_known_arguments(self, parser):
parser.add_argument('--vnffgd-file', help=_('Specify VNFFGD file'))
parser.add_argument(
'name', metavar='NAME',
help=_('Set a name for the VNFFGD'))
parser.add_argument(
'--description',
help=_('Set a description for the VNFFGD'))
def args2body(self, parsed_args):
body = {self.resource: {}}
if parsed_args.vnffgd_file:
with open(parsed_args.vnffgd_file) as f:
vnffgd = yaml.safe_load(f.read())
body[self.resource]['template'] = {'vnffgd': vnffgd}
tackerV10.update_dict(parsed_args, body[self.resource],
['tenant_id', 'name', 'description'])
return body
class DeleteVNFFGD(tackerV10.DeleteCommand):
"""Delete a given VNFFGD."""
resource = _VNFFGD
class ShowTemplateVNFFGD(tackerV10.ShowCommand):
"""Show template of a given VNFFGD."""
resource = _VNFFGD
def run(self, parsed_args):
self.log.debug('run(%s)', parsed_args)
template = None
data = self.get_data(parsed_args)
try:
attributes_index = data[0].index('template')
attributes_json = data[1][attributes_index]
template = jsonutils.loads(attributes_json).get('vnffgd', None)
except (IndexError, TypeError, ValueError) as e:
self.log.debug('Data handling error: %s', str(e))
print(template or _('Unable to display VNFFGD template!'))
|
pepaslabs/pywiki
|
pam_authenticate.py
|
Python
|
mit
| 684
| 0.004386
|
#!/usr/bin/
|
python
# pam_authenticate.py: a script to check a user's password against PAM.
# part of the pywiki project, see https://github.com/pepaslabs/pywiki
# written by jason pepas, released under the terms of the MIT license.
# usage: pipe a password into this script, giving the username as the first
|
argument.
# a zero exit status indicates successful authentication.
import sys
import pam # pip install python-pam
p = pam.pam()
logged_in = False
try:
user = sys.argv[1]
passwd = sys.stdin.read()
logged_in = p.authenticate(user, passwd)
except Exception as e:
sys.exit(2)
else:
if logged_in == True:
sys.exit(0)
else:
sys.exit(1)
|
mohamedattahri/Greendizer-Python-Library
|
greendizer/clients/base.py
|
Python
|
bsd-3-clause
| 1,742
| 0.002296
|
# -*- coding: utf-8 -*-
import re
from math import modf
from datetime import datetime, timedelta
## {{{ http://code.activestate.com/recipes/65215/ (r5)
EMAIL_PATTERN = re.compile('^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]' \
'+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$')
def to_unicode(text):
'''
Converts an input text to a unicode object.
@param text:object Input text
@returns:unicode
'''
return text.decode("UTF-8") if type(text) == str else unicode(text)
def to_byte_string(text):
'''
Converts an input text to a unicode object.
@param text:object Input text
@returns:unicode
'''
return text.encode("UTF-8") if type(text) == unicode else str(text)
def is_valid_email(s):
'''
Returns a value indicating whether the
|
submitted string is a valid
email address.
@param s:str Email
@return: bool
'''
return (s and len(s) > 7 and EMAIL_PATTERN.match(s))
def timestamp_to_datetime
|
(s):
'''
Parses a timestamp to a datetime instance.
@param: s:str Timestamp string.
@return: datetime
'''
f, i = modf(long(s) / float(1000))
return datetime.fromtimestamp(i) + timedelta(milliseconds=f * 1000)
def datetime_to_timestamp(d):
'''
Converts a datetime instance into a timestamp string.
@param d:datetime Date instance
@return:long
'''
return long(d.strftime("%s") + "%03d" % (d.time().microsecond / 1000))
def extract_id_from_uri(s):
'''
Returns the ID section of an URI.
@param s:str URI
@return: str
'''
return [ item for item in s.split("/") if item ][-1]
def size_in_bytes(data):
'''
Gets the size in bytes of a str.
@return: long
'''
return len(data)
|
zalew/fabric-pgbackup
|
setup.py
|
Python
|
mit
| 1,243
| 0
|
#!/us
|
r/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils
|
.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='fabric-pgbackup',
version='0.1.0',
description='PostgreSQL backup/restore utilities for Fabric',
long_description=readme + '\n\n' + history,
author='Jakub Zalewski',
author_email='zalew7@gmail.com',
url='https://github.com/zalew/fabric-pgbackup',
packages=[
'fabric_pgbackup',
],
include_package_data=True,
install_requires=[
'fabric',
'psycopg2',
],
license="MIT",
zip_safe=False,
keywords='fabric-pgbackup',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
)
|
RaumZeit/gdesklets-core
|
shell/plugins/PackageInstaller/Downloader.py
|
Python
|
gpl-2.0
| 3,008
| 0.004654
|
import gobject
import gtk
class Downloader(gtk.Dialog):
def __init__(self, path):
self.__is_cancelled = False
gtk.Dialog.__init__(self, title = "", buttons = (gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL))
self.set_default_size(300, 100)
self.connect("response", self.__on_cancel)
vbox = self.vbox
hbox = gtk.HBox(False, 12)
hbox.set_border_width(12)
vbox.pack_start(hbox, True, True)
import os
img = gtk.Image()
img.set_from_file(os.path.join(path, "download.png"))
hbox.pack_start(img, False, False)
vbox = gtk.VBox()
hbox.pack_end(vbox, True, True)
lbl = gtk.Label("<b>" + _("Retrieving:") + "</b>")
lbl.set_use_markup(True)
align = gtk.Alignment(0.0, 0.0)
align.add(lbl)
vbox.add(align)
self.__label = gtk.Label("")
self.__label.set_use_markup(True)
align = gtk.Alignment(0.0, 0.0)
align.add(self.__label)
vbox.add(align)
self.__bar = gtk.ProgressBar()
vbox.add(self.__bar)
def __on_cancel(self, src, response):
self.__is_cancelled = True
def download(self, url, dest):
name = url
if (len(name) >= 60):
name = name[:30] + "..." + name[-30:]
gobject.timeout_add(0, self.__label.set_text, "%s" % (name))
gobject.timeout_add(0, self.__bar.set_fraction, 0)
gobject.timeout_add(0, self.__bar.set_text, "Contacting...")
gobject.timeout_add(0, self.show_all)
self.__is_cancelled = False
dest_fd = open(dest, "w")
import gconf
client = gconf.client_get_default()
use_proxy = client.get_bool('/system/http_proxy/use_http_proxy')
if (use_proxy != 0):
host = client.get_string('/system/http_proxy/host')
port = client.get_int('/system/http_proxy/port')
if (host != ""):
http_proxy = "http://" + host + ':' + str(port)
else:
http_proxy = None
else:
http_proxy = None
import urllib2
if (http_proxy is not None):
proxy_support = urllib2.ProxyHandler({"http" : http_proxy})
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
src_fd = urllib2.urlopen(url)
total_size = src_fd.info().get("Content-Length", 0)
so_far = 0
while (not self.__is_cancelled):
data = src_fd.read(4096)
if (not data):
break
dest_fd.wri
|
te(data)
so_far += len(data)
value = (100 * so_far / max(0.1, float(total_size)))
gob
|
ject.timeout_add(0, self.__bar.set_fraction, value / 100.0)
gobject.timeout_add(0, self.__bar.set_text, "%i%%" % (value))
src_fd.close()
dest_fd.close()
gobject.timeout_add(0, self.hide)
|
mcclurmc/juju
|
juju/unit/tests/test_address.py
|
Python
|
agpl-3.0
| 3,698
| 0
|
import subprocess
import zookeeper
from twisted.internet.defer import inlineCallbacks, succeed, returnValue
from twisted.web import client
from juju.errors import JujuError
from juju.lib.testing import TestCase
from juju.unit.address import (
EC2UnitAddress, LocalUnitAddress, OrchestraUnitAddress, DummyUnitAddress,
get_unit_address)
from juju.state.environment import GlobalSettingsStateManager
class AddressTest(TestCase):
def setUp(self):
zookeeper.set_debug_level(0)
self.client = self.get_zookeeper_client()
return self.client.connect()
@inlineCallbacks
def get_address_for(self, provider_type):
settings = GlobalSettingsStateManager(self.client)
yield settings.set_provider_type(provider_type)
address = yield get_unit_address(self.client)
returnValue(address)
@inlineCallbacks
def test_get_ec2_address(self):
address = yield self.get_address_for("ec2")
self.assertTrue(isinstance(address, EC2UnitAddress))
@inlineCallbacks
def test_get_local_address(self):
address = yield self.get_address_for("local")
self.assertTrue(isinstance(address, LocalUnitAddress))
@inlineCallbacks
def test_get_orchestra_address(self):
address = yield self.get_address_for("orchestra")
self.assertTrue(isinstance(address, OrchestraUnitAddress))
@inlineCallbacks
def test_get_dummy_address(self):
address = yield self.get_address_for("dummy")
self.assertTrue(isinstance(address, DummyUnitAddress))
def test_get_unknown_address(self):
return self.assertFailure(self.get_address_for("foobar"), JujuError)
class DummyAddressTest(TestCase):
def setUp(self):
self.address = DummyUnitAddress()
def test_get_address(self):
self.assertEqual(
(yield self.address.get_public_address()),
"localhost")
self.assertEqual(
(yield self.address.get_private_address()),
"localhost")
class EC2AddressTest(TestCase):
def setUp(self):
self.address = EC2UnitAddress()
@inlineCallbacks
def test_get_address(self):
urls = [
"http://169.254.169.254/latest/meta-data/local-hostname",
"http://169.254.169.254/latest/meta-data/public-hostname"]
def verify_args(url):
self.assertEqual(urls.pop(0), url)
return succeed("foobar\n")
self.patch(client, "getPage", verify_args)
self.assertEqual(
(yield self.address.get_private_addres
|
s()), "foobar")
self.assertEqual(
(yield self.address.get_public_address()), "foobar")
class LocalAddressTest(TestCase):
def setUp(self):
self.address =
|
LocalUnitAddress()
@inlineCallbacks
def test_get_address(self):
self.patch(
subprocess, "check_output",
lambda args: "192.168.1.122 127.0.0.1\n")
self.assertEqual(
(yield self.address.get_public_address()),
"192.168.1.122")
self.assertEqual(
(yield self.address.get_private_address()),
"192.168.1.122")
class OrchestraAddressTest(TestCase):
def setUp(self):
self.address = OrchestraUnitAddress()
@inlineCallbacks
def test_get_address(self):
self.patch(
subprocess, "check_output",
lambda args: "slice.foobar.domain.net\n")
self.assertEqual(
(yield self.address.get_public_address()),
"slice.foobar.domain.net")
self.assertEqual(
(yield self.address.get_private_address()),
"slice.foobar.domain.net")
|
dungeonsnd/test-code
|
dev_examples/pyserver/conf/pyserverconf.py
|
Python
|
gpl-3.0
| 779
| 0.03466
|
#!/bin/env python
# -*- coding: utf-8 -*-
process_count = 1
start_server_port =8600
log_file ='../log/pyserver.log'
db_host ='192.168.17
|
.153'
db_port =3306
db_username ='root'
db_passwd ='tm'
db_database ='test'
db_connection_pool_size =16
coroutine_pool_size_per_process =100000
tcp_backlog =1024
tcp_listen_on_ip ='0.0.0.0'
cache_conf ={'pyscache0': {'cache_host':'192.168.17.153',
'cache_port':6379,
'cache_database':15,
'cache_connection_pool_size':4},
'pyscach
|
e1': {'cache_host':'192.168.17.153',
'cache_port':6379,
'cache_database':14,
'cache_connection_pool_size':4}
}
|
wenli810620/twitter-photos
|
twphotos/increment.py
|
Python
|
bsd-2-clause
| 1,165
| 0
|
import ConfigParser
from .settings import SECTIONS, CONFIG
config = ConfigParser.ConfigParser()
config.read(CONFIG)
if not config.has_section(SECTIONS['INCREMENTS']):
config.add_section(SECTIONS['INCREMENTS'])
with open(CONFIG, 'w') as f:
config.write(f)
def read_since_ids(users):
"""
Read max ids of the last downloads
:param users: A list of users
Return a dictionary mapping users to ids
"""
since_ids = {}
for user in users:
if config.has_option(SECTIONS['INCREMENTS'], user):
since_ids[user] = config.getint(SECTIONS['INCREMENTS'], user) + 1
return since_ids
def set_max_ids(max_ids):
"""
Set max ids of the current downloads
:
|
param max_ids: A dictionary mapping users to ids
"""
config.read(CONFIG)
for user, max_id in max_ids.items():
config.set(SECTIONS['INCREMENTS'], user, str(max_id))
with open(CONFIG, 'w') as f:
config.write(f)
def remove_since_id(user):
if config.has_option(SECTIONS['INCREMENTS'], user):
config.remove_option(SECTIONS['INCREMENTS'], user)
with open(CONFIG, 'w') as f:
|
config.write(f)
|
Scan-o-Matic/scanomatic
|
tests/unit/scanning/test_terminate_scanjob.py
|
Python
|
gpl-3.0
| 2,729
| 0
|
from datetime import datetime, timedelta
from freezegun import freeze_time
from mock import MagicMock
import pytest
from pytz import utc
from scanomatic.data.scanjobstore import ScanJobStore
from scanomatic.models.scanjob import ScanJob
from scanomatic.scanning.terminate_scanjob import (
TerminateScanJobError, UnknownScanjobError, terminate_scanjob
)
def make_scanjob(
start_time=datetime(
1985, 10, 26, 1, 20, tzinfo=utc
),
termination_time=None,
duration=timedelta(minutes=20),
):
return ScanJob(
duration=duration,
identifier='scjb000',
interval=timedelta(minutes=5),
name='Test Scan J
|
ob',
scanner_id='scnr000',
start_time=start_time,
termination_time=termination_time,
)
class TestTerminateScanjob:
def test_unknown_scanjob(self):
store = MagicMock(ScanJobStore)
store.get_scanjob_by_id.side_effect = LookupError
with pytest.raises(UnknownScanjobError):
terminate_scanjob
|
(store, 'unknown', 'The Message')
def test_not_started(self):
store = MagicMock(ScanJobStore)
store.get_scanjob_by_id.return_value = make_scanjob(start_time=None)
with pytest.raises(TerminateScanJobError):
terminate_scanjob(store, 'scjb000', 'The Message')
def test_already_terminated(self):
store = MagicMock(ScanJobStore)
store.get_scanjob_by_id.return_value = make_scanjob(
start_time=datetime(
1985, 10, 26, 1, 20, tzinfo=utc
),
termination_time=datetime(
1985, 10, 26, 1, 21, tzinfo=utc
)
)
with pytest.raises(TerminateScanJobError):
terminate_scanjob(store, 'scjb000', 'The Message')
def test_already_ended(self):
store = MagicMock(ScanJobStore)
store.get_scanjob_by_id.return_value = make_scanjob(
start_time=datetime(
1985, 10, 26, 1, 20, tzinfo=utc
),
termination_time=None,
)
with pytest.raises(TerminateScanJobError):
terminate_scanjob(store, 'scjb000', 'The Message')
def test_running_scanjob(self):
store = MagicMock(ScanJobStore)
store.get_scanjob_by_id.return_value = make_scanjob(
start_time=datetime(
1985, 10, 26, 1, 20, tzinfo=utc
),
duration=timedelta(minutes=20),
)
now = datetime(1985, 10, 26, 1, 21, tzinfo=utc)
with freeze_time(now):
terminate_scanjob(store, 'scjb000', 'The Message')
store.terminate_scanjob.assert_called_with(
'scjb000', now, 'The Message'
)
|
markpasc/termtool
|
setup.py
|
Python
|
mit
| 1,440
| 0.002778
|
from distutils.core import setup
long_description = """
`termtool` helps you write subcommand-based command line tools in Python. It collects several Python libraries into a declarative syntax:
* `argparse`, the argument parsing module with subcommand support provided in the standard library in Python 2.7 and later.
* `prettytable <http://code.google.com/p/python-progressbar/>`_, an easy module for building tables of information.
* `progressbar <http://code.google.com/p/python-progressbar/>`_, a handy module for displaying progress bars.
* `logging`, the simple built-in module for logging messages.
"""
setup(
name='termtool',
version='1.1',
description='Dec
|
larative terminal tool programming',
author='Mark Paschal',
author_email='markpasc@markpasc.org',
url='https://github.com/markpasc/termtool',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating Sy
|
stem :: Unix',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
packages=[],
py_modules=['termtool'],
requires=['argparse', 'PrettyTable', 'progressbar'],
)
|
dvspirito/pymeasure
|
docs/conf.py
|
Python
|
mit
| 8,651
| 0.005895
|
# -*- coding: utf-8 -*-
#
# PyMeasure documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 6 13:06:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..')) # Allow modules to be found
# Include Read the Docs formatting
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyMeasure'
copyright = u'2013-2017, PyMeasure Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyMeasuredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additio
|
nal stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyMeasure.tex', u'PyMeasure Documentation',
u'PyMeasure Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the t
|
itle page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymeasure', u'PyMeasure Documentation',
[u'PyMeasure Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyMeasure', u'PyM
|
DavidNorman/tensorflow
|
tensorflow/python/keras/mixed_precision/experimental/policy.py
|
Python
|
apache-2.0
| 24,791
| 0.005042
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util.tf_export import keras_export
# Default value of certain arguments, indicating the default behavior for
# that argument should be used.
USE_DEFAULT = 'USE_DEFAULT'
@keras_export('keras.mixed_precision.experimental.Policy')
class Policy(object):
"""A dtype policy for a Keras layer.
A dtype policy determines dtype-related aspects of a layer, such as its
computation and variable dtypes. Each layer has a policy. Policies can be
passed to the `dtype` argument of layer constructors, or a global policy can
be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will
default to the global policy if no policy is passed to it's constructor.
For many models, each layer's policy will have the same compute dtype and
variable dtype, which will typically be float32. In this case, we refer to the
singular dtype as the layer's dtype, which can be queried by the property
`tf.keras.layers.Layer.dtype`.
When mixed precision training is used, most layers will instead have a float16
or bfloat16 compute dtype and a float32 variable dtype, and so the layer does
not have a single dtype. See [this
link](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
for more information on mixed precision training. When the variable dtype does
not match the compute dtype, variables will be automatically casted to the
compute dtype to avoid type errors. In this case,
`tf.keras.layers.Layer.dtype` refers to the variable dtype, not the compute
dtype.
Certain policies also have a `tf.mixed_precision.experimental.LossScale`
instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss
scaling is a technique used with mixed precision to avoid numerical underflow
in float16 gradients. Loss scaling is only done by Models in `Model.fit`,
`Model.train_on_batch`, and similar methods. Layers which are not Models
ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
determines the compute and variable dtypes. It can be one of the following:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype. No loss scaling is done by default.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. These policies are used for
mixed precision training. With 'mixed_float16', a dynamic loss scale is
used by default. 'mixed_bfloat16' does no loss scaling by default, as loss
scaling is unnecessary with bfloat16.
### How to use mixed precision in a Keras model
|
To use mixed precision in a Keras model, the `'mixed_float16'` or
`'mixed_bfloat16'` policy can be used.
`tf.keras.mixed_precision.experimental.set_policy` can be used to set the
default policy for layers if no policy is passed to them. For example:
```python
tf.keras.mixed_prec
|
ision.experimental.set_policy('mixed_float16')
model = tf.keras.models.Sequential([
tf.keras.layers.Input((100,)),
# Dense layers use global policy of 'mixed_float16', which does
# computations in float16 while keeping variables in float32.
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(10),
# Softmax should be done in float32 for numeric stability. We pass
# dtype='float32' to use float32 instead of the global policy.
tf.keras.layers.Activation('softmax', dtype='float32')
])
model.compile(...)
model.fit(...) # Train `model`
```
Alternatively, the policy can be passed to individual layers instead of
setting the global policy with `set_policy`:
```python
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
model = tf.keras.models.Sequential([
tf.keras.layers.Input((100,)),
tf.keras.layers.Dense(10, dtype=policy),
tf.keras.layers.Dense(10, dtype=policy),
# Softmax should be done in float32 for numeric stability.
tf.keras.layers.Activation('softmax', dtype='float32')
])
model.compile(...)
model.fit(...) # Train `model`
```
Note the `'mixed_float16'` policy will apply loss scaling by default in
`Model.fit`, `Model.train_on_batch`, and other training methods. If no such
method is used (e.g., a custom training loop is used) and `'mixed_float16'` is
used, the loss scale must be manually applied. See
`tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For
`'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be
manually applied.
### How to use float64 in a Keras model
Using float64 is similar to mixed precision. Either the global policy can be
set to float64, or `dtype='float64'` can be passed to individual layers. For
example, to set the global policy:
```python
tf.keras.mixed_precision.experimental.set_policy('float64')
model = tf.keras.models.Sequential([
tf.keras.layers.Input((100,)),
# All layers use global policy of 'float64', which does computations and
# creates variables in float64.
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(10),
tf.keras.layers.Activation('softmax')
])
model.compile(...)
model.fit(...) # Train `model`
```
### How a layer uses its policy's compute dtype
A layer will cast its inputs to its compute dtype in TensorFlow 2. For
example:
```python
x = tf.ones((4, 4, 4, 4), dtype='float64')
# `layer`'s policy defaults to float32.
layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
# `layer` casts it's inputs to its compute dtype, which is float32, and does
# computations in float32.
y = layer(x)
print(y.dtype) # float32
```
Currently, only tensors in the first argument to the layer's `call` method are
casted. For example:
```python
class MyLayer(tf.keras.layers.Layer):
# Bug! `b` will not be casted.
def call(self, a, b):
return a + 1., b + 1.
a = tf.constant(1., dtype="float32")
b = tf.constant(1., dtype="float32")
layer = MyLayer(dtype="float64")
x, y = layer(a, b)
print(x.dtype) # float64
print(y.dtype) # float32. Not casted since `b` was not passed to first input
```
It is recommended to accept tensors only in the first argument. This way, all
tensors are casted to the layer's compute dtype. `MyLayer` should therefore be
written as:
```python
class MyLayer(tf.keras.layers.Layer):
# Now, all tensor inputs will be casted.
def call(self, inputs):
a, b = inputs
return a + 1., b + 1.
a = tf.constant(1., dtype="float32")
b = tf.constant(1., dtype="float32")
layer = MyLayer(dtype="float64")
x, y = layer((a, b))
print(x.dtype) # float64
print(y.dtype) # floa
|
vasili-v/ctauto
|
test/test_parser.py
|
Python
|
gpl-3.0
| 9,771
| 0.000819
|
import unittest
from ctauto.exceptions import CTAutoMissingEndOfMetablockError, \
CTAutoBrokenEndOfMetablockError, \
CTAutoInvalidMetablockError, \
CTAutoInvalidIdError, \
CTAutoMissingEndOfStringError, \
CTAutoInvalidStringError, \
CTAutoIncompleteEscapeSequence, \
CTAutoInvalidEscapeSequence, \
CTAutoTrailingCharacterAfterQuotedText, \
CTAutoInvalidNumberError
from ctauto.blocks import Block, MetaBlock
from ctauto.tokens import SimpleTextToken, QuotedTextToken, NumericToken, \
DotToken, LeftSquareBracketToken, RightSquareBracketToken
from ctauto.parser import EndOfFileCharacter, Parser, TemplateParser
_TEST_CONTENT = "<% metacode 1 %>\n" \
"#include <stdio.h>\n" \
"\n" \
"int main(void)\n" \
"{\n" \
" <% metacode 2 %>\n" \
" // <% metacode 3 %>\n" \
" return 0;\n" \
" <% metacode 4 . [ 1 ] %>\n" \
"}\n"
class TestParser(unittest.TestCase):
def test_parse(self):
class TestParser(Parser):
def reset(self, content, source):
self.source = source
self.content = content
self.indexes = []
self.characters = []
return self.first
def finalize(self):
return self.indexes, self.characters
def first(self, index, character):
self.indexes.append(index)
self.characters.append(character)
return self.second
def second(self, index, character):
self.indexes.append(index)
self.characters.append(character)
return self.third
def third(self, index, character):
if character is EndOfFileCharacter:
self.indexes.append(index)
self.characters.append(character)
return
self.indexes[-1] = index
self.characters[-1] = character
return self.third
parser = TestParser()
indexes, characters = parser.parse(_TEST_CONTENT, "test")
self.assertEqual(parser.source, "test")
self.assertEqual(parser.content, _TEST_CONTENT)
length = len(_TEST_CONTENT)
self.assertEqual(indexes, [0, length-1, length])
self.assertEqual(characters, ['<', '\n', EndOfFileCharacter])
class TestTemplateParser(unittest.TestCase):
def test_template_parse(self):
parser = TemplateParser()
blocks = parser.parse(_TEST_CONTENT, "test")
self.assertEqual(parser.source, "test")
self.assertEqual(parser.content, _TEST_CONTENT)
self.assertEqual(len(blocks), 8)
block = blocks[0]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 1 ")
self.assertEqual(block.tokens,
[SimpleTextToken(1, "metacode"),
NumericToken(1, "1")])
block = blocks[1]
self.assertIsInstance(block, Block)
|
self.assertEqual(block.content, "\n"
"#include <stdio.h>\n"
|
"\n"
"int main(void)\n"
"{\n"
" ")
block = blocks[2]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 2 ")
self.assertEqual(block.tokens,
[SimpleTextToken(6, "metacode"),
NumericToken(6, "2")])
block = blocks[3]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
" // ")
block = blocks[4]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 3 ")
self.assertEqual(block.tokens,
[SimpleTextToken(7, "metacode"),
NumericToken(7, "3")])
block = blocks[5]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
" return 0;\n"
" ")
block = blocks[6]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 4 . [ 1 ] ")
self.assertEqual(block.tokens,
[SimpleTextToken(9, "metacode"),
NumericToken(9, "4"),
DotToken(9),
LeftSquareBracketToken(9),
NumericToken(9, "1"),
RightSquareBracketToken(9)])
block = blocks[7]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
"}\n")
def test_invalid_ends_of_metablock(self):
parser = TemplateParser()
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<% %", "test")
with self.assertRaises(CTAutoBrokenEndOfMetablockError):
parser.parse("<% %!", "test")
def test_invalid_metablock(self):
parser = TemplateParser()
with self.assertRaises(CTAutoInvalidMetablockError):
parser.parse("<% ! %>", "test")
def test_end_of_metablock_while_skipping_whitespaces(self):
parser = TemplateParser()
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse(" <% ", "test")
def test_multiline_metablock(self):
parser = TemplateParser()
blocks = parser.parse("<%\tx\n\ty\n\tz\n\tt%>", "test")
self.assertEqual(blocks[0].tokens,
[SimpleTextToken(1, "x"),
SimpleTextToken(2, "y"),
SimpleTextToken(3, "z"),
SimpleTextToken(4, "t")])
def test_simple_text_token(self):
parser = TemplateParser()
blocks = parser.parse("<%test%>", "test")
self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test")])
blocks = parser.parse("<% test %>", "test")
self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test")])
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<%s test", "test")
with self.assertRaises(CTAutoInvalidIdError):
parser.parse("<%s test! %>", "test")
def test_quoted_text_token(self):
parser = TemplateParser()
blocks = parser.parse("<%\"test\"%>", "test")
self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test")])
blocks = parser.parse("<% \"test \\\\ \\\"test\\\" \\n \\t \\r \\a\" %>", "test")
self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test \\ \"test\" \n \t \r \\a")])
with self.assertRaises(CTAutoMissingEndOfStringError):
parser.parse("<%\"test%>", "test")
with self.assertRaises(CTAutoInvalidStringError):
parser.parse("<%\"test\n%>", "test")
with self.assertRaises(CTAutoIncompleteEscapeSequence):
parser.parse("<% \"test \\", "test")
with self.assertRaises(CTAutoInvalidEscapeSequence):
parser.parse("<% \"test \\\n test\" %>", "test")
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<% \"test\"", "test")
with self.assertRaises(CTAutoTrailingCharacterAfterQuotedText):
parser.parse("<% \"test\"test %>", "test")
def test_numeric_token(self):
parser = TemplateParser()
blocks = parser.pa
|
CentralLabFacilities/pepper_behavior_sandbox
|
pepper_behavior/skills/calculate_person_position.py
|
Python
|
gpl-3.0
| 4,483
| 0.002456
|
import smach
import rospy
import tf
import math
import random
class CalculatePersonPosition(smach.Sta
|
te):
def __init__(self, controller, controller_2=None, sensor=None, max_distance=2.5, onlyhorizontal=False, knownperson=True):
self.person_sensor =
|
controller
self.max_distance = max_distance
self.person_id = sensor
self.talk_known = controller_2
self.ignoreknownperson = knownperson
self.talks = ['Oh, ich denke Dich habe ich schon begrüsst',
'Dich kenne ich schon, ich mache weiter',
'Oh schön dich wieder zu sehen, bis gleich.',
'Wen haben wir denn da, dich kenne ich. Gleich gehen die Vorträge los!',
'Hallo noch mal. Wie wäre es wenn wir uns nachher unterhalten?']
self.counter = 0
# https://answers.ros.org/question/10777/service-exception-using-tf-listener-in-rospy
self.tf = tf.TransformListener()
input = []
self.onlyhorizontal = onlyhorizontal
if onlyhorizontal:
input = ['old_vertical']
smach.State.__init__(self, input_keys=input, outcomes=['success', 'repeat', 'no_person_found', 'known'],
output_keys=['person_angle_vertical', 'person_angle_horizontal'])
def execute(self, userdata):
self.person = None
self.person_sensor.clearPerson()
rospy.sleep(0.1)
self.person = self.person_sensor.getDetectedPerson()
self.pose = None
self.transformid = None
self.dist = self.max_distance
for p in self.person:
pose = p.pose
dist = distance(pose.pose.position)
rospy.logwarn
if not self.tf.frameExists('base_link'):
rospy.logwarn('Base_link does not exist!')
if not self.tf.frameExists(pose.header.frame_id):
rospy.logwarn('%s does not exist!' % str(pose.header.frame_id))
if dist < self.dist and self.tf.frameExists(pose.header.frame_id) and self.tf.frameExists('base_link'):
try:
print(p.pose)
self.tf.waitForTransform('base_link', 'CameraDepth_optical_frame', rospy.Time.now(),
rospy.Duration(4.0))
po = self.tf.transformPose("base_link", pose)
self.dist = dist
self.pose = po.pose
self.transformid = p.transformid
except Exception, e:
print("Exception %s" % e)
return 'repeat'
if dist > self.max_distance and self.max_distance == self.dist:
print('Detected person to far away. Distance: %s ' % dist)
if self.pose:
(vertical, horizontal) = rotation(self.pose)
self.counter = 0
if self.onlyhorizontal:
userdata.person_angle_vertical = userdata.old_vertical
else:
userdata.person_angle_vertical = vertical
userdata.person_angle_horizontal = horizontal
if self.dist < 1.8 and self.transformid is not None:
try:
known, name = self.person_id.identify(self.transformid)
if known and self.ignoreknownperson:
rospy.loginfo("Person is known, iterating")
if self.talk_known is not None:
self.talk_known.say_something(random.choice(self.talks))
return 'known'
else:
return 'success'
except Exception, e:
rospy.logwarn("Something went wront while identifying a person")
return 'success'
elif self.counter > 5:
self.counter = 0
return 'no_person_found'
else:
self.counter = self.counter + 1
return 'repeat'
def distance(trans):
dist = math.sqrt(trans.x * trans.x + trans.y * trans.y + trans.z * trans.z)
return dist
def rotation(pose):
print ("orientation")
x = pose.position.x + 0.0133
y = pose.position.y
z = pose.position.z - 0.288 + 0.2
print(x)
print(y)
print(z)
horizontal = math.degrees(math.atan2(y, x))
vertical = math.degrees(math.atan2(-z, x))
print(horizontal)
print(vertical)
return (vertical, horizontal)
|
borfast/housing-reviews
|
housing_reviews/settings/auth.py
|
Python
|
mit
| 476
| 0
|
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_REDIRECT_URL = 'reviews'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = True
ACCOUNT_LOGOUT_ON_GET =
|
True
ACCOUNT_PASSWORD_MIN_LENGTH = 10
ALLOW_NEW_
|
REGISTRATIONS = True
|
bmng-dev/PyBitmessage
|
src/helper_sent.py
|
Python
|
mit
| 132
| 0.015152
|
from helpe
|
r_sql import sqlExecute
def insert(t):
sqlExecute('''INSERT INTO sent VALUES (?,?,?,?
|
,?,?,?,?,?,?,?,?,?,?,?)''', *t)
|
tomato42/tlsfuzzer
|
scripts/test-early-application-data.py
|
Python
|
gpl-2.0
| 9,619
| 0.002911
|
# Author: Hubert Kario, (c) 2015
# Released under Gnu GPL v2.0, see LICENSE file for details
from __future__ import print_function
import traceback
import sys
import getopt
from itertools import chain
from random import sample
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, AlertGenerator, \
ResetHandshakeHashes
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectApplicationData, ExpectClose
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription, \
ExtensionType
from tlsfuzzer.utils.lists import natural_sort_keys
version = 3
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this")
print(" it will be reported in the test summary and the whole script will fail.")
print(" May be specified multiple times.")
print(" -X message expect the `message` substring in exception raised during")
print(" execution of preceding expected failure probe")
print(" usage: [-x probe-name] [-X exception], order is compulsory!")
print(" -n num run 'num' or all(if 0) tests instead of default(all)")
print(" (excluding \"sanity\" tests)")
print(" --help this message")
def main():
"""
Test if server aborts connection upon receiving application data
before Finished
"""
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
expected_failures = {}
last_exp_
|
tmp = None
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:x:X:n:", ["help"])
for opt, arg
|
in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
conversations = {}
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
node = node.add_child(ClientHelloGenerator(ciphers))
node = node.add_child(ExpectServerHello())
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
bytearray(b"GET / HTTP/1.0\n\n")))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
conversations["sanity"] = conversation
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ApplicationDataGenerator(bytearray(b"hello server!\n")))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
node = node.add_child(ExpectClose())
conversations["before Client Key Exchange"] = conversation
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ApplicationDataGenerator(bytearray(b"hello server!\n")))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
node = node.add_child(ExpectClose())
conversations["before Change Cipher Spec"] = conversation
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(ApplicationDataGenerator(bytearray(b"hello server!\n")))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
node = node.add_child(ExpectClose())
conversations["before Finished"] = conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
# make sure that sanity test is run first and last
# to verify that server was running and kept running throughout
sanity_tests = [('sanity', conversations['sanity'])]
if run_only:
if num_limit > len(run_only):
num_limit = len(run_only)
regular_tests = [(k, v) for k, v in conversations.items() if
k in run_only]
else:
regular_tests = [(k, v) for k, v in conversations.items() if
(k != 'sanity') and k not in run_exclude]
sampled_tests = sample(regular_tests, min(num_limit, len(regular_tests)))
ordered_tests = chain(sanity_tests, sampled_tests, sanity_tests)
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
exception = None
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but tes
|
pallets/jinja
|
examples/basic/test.py
|
Python
|
bsd-3-clause
| 675
| 0
|
from jinja2 import Environment
from jinja2.loader
|
s import DictLoader
env = Environment(
loader=DictLoader(
{
"child.html": """\
{% extends default_layout or 'default.html' %}
{% include helpers = 'helpers.html' %}
{% macro get_the_answer() %}42{% endmacro %}
{% title = 'Hello World' %}
{% block body %}
{{ get_the_answer() }}
{{ helpers.conspirate() }}
{% endblock %}
""",
"default.html": """\
<!doctype html>
<title>{{ title }}</title>
{% block body %}{% e
|
ndblock %}
""",
"helpers.html": """\
{% macro conspirate() %}23{% endmacro %}
""",
}
)
)
tmpl = env.get_template("child.html")
print(tmpl.render())
|
neversettle7/image-color-sorter
|
pixelsorter.py
|
Python
|
gpl-3.0
| 4,583
| 0.002618
|
# The time library is needed to measure execution time
# PIL library to manipulate images
# colorsys library to manipulate colors
# operator library to sort the values of the array in the fastest way
import os
import sys
import time
from sorter import *
from painter import *
from explorer import *
start_time = time.time()
#script_dir = os.path.dirname(__file__) # the absolute path of the script
script_dir = os.path.dirname(os.path.abspath(__file__)) # the absolute path of the script
print(script_dir);
def run(fileinput, userchoice, fillpattern):
"""
This is the main function calling all the other methods.
:param fileinput: the file to be taken as input
:param userchoice: the algorithm that has to be executed
:return:
"""
explorer = Explorer()
painter = Painter()
sorter = Sorter()
print("\nExecuting {0} algorithm\n".format(userchoice))
# Open the file, get the content and the size
oldimg, oldimgcontent = explorer.imgopen(fileinput)
size = painter.getsize(oldimg)
width = size[1]
height = size[2]
# We store the pixel values of the original image in an array
pixvalues = painter.getpixels(oldimgcontent, width, height)
algostarttime = time.time()
if userchoice == "hsp":
sortedvalues = sorter.sort_hsp(pixvalues)
# HSL sorting - We have to convert the values from RGB to HSL and then sort
elif userchoice == "hsl":
pixvalues = sorter.rgbtohsl(pixvalues)
sortedvalues = sorter.sort_hsl(pixvalues)
# HSV sorting - We have to convert the values from RGB to HSV and then sort
elif userchoice == "hsv":
pixvalues = sorter.rgbtohsv(pixvalues)
sortedvalues = sorter.sort_hsv(pixvalues)
elif userchoice == "red":
sortedvalues = sorter.sort_firstvalue(pixvalues)
elif userchoice == "rellum":
sortedvalues = sorter.sort_rellum(pixvalues) # print(pixvalues)
# print(sortedvalues)
else:
print("Invalid algorithm choice, the program will now exit.")
sys.exit()
print("--- algorithm execution time: %s s--" % (time.time() - algostarttime))
newimg, newimgcontent = explorer.imgcreate(width, height)
# Write the content of the image
if fillpattern == 'horizontal':
painter.fill_horizontal(sortedvalues, newimgcontent, width, height)
elif fillpattern == 'vertical':
painter.fill_vertical(sortedvalues,newimgcontent, width, height)
elif fillpattern == 'spiral':
painter.fill_spiral(sortedvalues,newimgcontent, width, height)
# Save the image
output_name = "img-output-" + userchoice + ".jpg"
output_path = os.path.join(script_dir, 'output', output_name)
explorer.saveimg(newimg, output_path)
print("\nOutput file: /output/img-output-" + userchoice + ".jpg\n")
return
# Let the user choose the options
# Choose the file
print("\n\n-------------------------------------------------")
print("Insert the name of the image you want to sort")
print("PLEASE NOTE: the image should be in the \"input\" folder.")
print("Insert the name WITHOUT the folder path (example: image.jpg)\n")
fileinput = input("Leave blank if you want to use the default image (img-input-small.jpg): ")
if fileinput == "":
fileinput = "img-input-small.jpg"
input_path = os.path.join(script_dir, 'input', fileinput)
# Choose the algorithm
print("\nWhich sorting algorithm do you want to use?\n")
print("1. Hue sorting (HSV)")
print("2. Brightness - HSP color model (RGB)")
print("3. Relative luminance")
print("4. Red - Simple red sorting (RGB)")
print("5. HSL")
print("0. All of the available algorithms")
algo = {'1': 'hsv', '2': 'hsp', '3': 'rellum
|
', '4': 'red', '5': 'hsl'}
userinput = input("Select the algorithm: ")
# Choose the fill pattern
print("\nWhich fill pattern do you want to use?\n")
print("1. Vertical pattern (column by column)")
print("2. Horizontal pattern (row by row)")
print("3. Spiral pattern")
pattern = ({ '1': 'vertical', '2' : 'horizontal', '3' : 'spiral'})
fillpattern = input("Select th
|
e pattern: ")
patternchoice = pattern[fillpattern]
if fillpattern in pattern:
if userinput in algo:
userchoice = algo[userinput]
run(input_path, userchoice, patternchoice)
elif userinput == "0":
for x in range(1, len(algo) + 1):
userchoice = algo[str(x)]
run(input_path, userchoice, patternchoice)
pass
else:
print("Options not valid. Exiting program.")
sys.exit()
print("-- total exec time: %s seconds --" % (time.time() - start_time))
|
eugeneks/zmeyka
|
fb_req.py
|
Python
|
mit
| 10,919
| 0.013736
|
import requests
import copy
# Получаем участников группы FB
def fb_get_group_members(fb_page_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/members?limit=1000&access_token=%s' % (fb_page_id, access_token)
fb_group_members = {'status':'OK', 'data':{'members':[], 'users_count':0}}
while True:
response = requests.get(url)
if response.status_code == 200:
try:
keys = response.json().keys()
if 'data' in keys:
#title_id += [ id['id'] for id in content ]
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for member in content:
member['is_group_mamber'] = 1
fb_group_members['data']['members'].append(member)
if url =='':
break
except (KeyError, TypeError):
fb_group_members['status'] = 'Unknown error'
break
else:
fb_group_members['status'] = str(response.status_code)
break
fb_group_members['data']['users_count'] = len(fb_group_members['data']['members'])
return fb_group_members
# получаем общую информацию о группе
def fb_get_group_data(fb_page_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/?fields=id,name&access_token=%s' % (fb_page_id, access_token)
fb_group = {'status':'OK', 'data':{'id':'','name':'', 'updated_time':'','members':[], 'users_count':0, 'all_users':[] }}
response =requests.get(url)
if response.status_code == 200:
fb_data = fb_group['data']
data = response.json()
keys = data.keys()
if 'id' in keys:
fb_data['id'] = data['id']
else:
fb_group['status'] = 'Missing group id'
if 'name' in keys:
fb_data['name'] = data['name']
else:
fb_group['status'] = 'Missing group name'
'''
if 'updated_time' in keys:
fb_data['updated_time'] = data['updated_time']
'''
members = fb_get_group_members(fb_page_id, access_token)
if members['status']== 'OK':
fb_group['data']['members'] = copy.deepcopy(members['data']['members'])
fb_group['data']['users_count'] = members['data']['users_count']
fb_group['data']['all_users'] = copy.deepcopy(members['data']['members'])
else:
fb_group['status'] = str(response.status_code)
return fb_group
#-----Получаем все посты из группы-------
def fb_get_all_posts(fb_page_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/feed?fields=id,name,link,message,from,updated_time,created_time&access_token=%s' % (fb_page_id, access_token)
fb_posts = {'status':'OK', 'data':{'posts':[],'posts_count':0}}
#fb_posts = {'status':'OK', 'data':{'id':'','name':'', 'updated_time':'','link':'', 'message':''}}
while True:
response = requests.get(url)
# print(response.status_code)
if response.status_code == 200:
try:
keys = response.json().keys()
#найти is jason
if 'data' in keys:
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for post in content:
fb_posts['data']['posts'].append(post)
if url =='':
break
except (KeyError, TypeError):
fb_posts['status'] = 'Unknown error'
break
else:
fb_posts['status'] = str(response.status_code)
break
fb_posts['data']['posts_count'] = len(fb_posts['data']['posts'])
return fb_posts
#получаем все лайки поста
def fb_get_post_likes(post_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/reactions/?access_token=%s' % (post_id, access_token)
fb_likes = {'status':'OK', 'data':{'likes':[],'likes_count':0}}
while True:
response = requests.get(url)
if response.status_code == 200:
try:
keys = response.json().keys()
#найти is jason
if 'data' in keys:
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for fb_like in content:
fb_likes['data']['likes'].append(fb_like)
if url =='':
break
except (KeyError, TypeError):
fb_likes['status'] = 'Unknown error'
break
else:
fb_likes['status'] = str(response.status_code)
break
fb_likes['data']['likes_count'] = len(fb_likes['data']['likes'])
return fb_likes
#получаем все комментарии поста
def fb_get_post_comments(post_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/comments/?fields=id,message,from,updated_time,created_time&access_token=%s' % (post_id, access_token)
fb_comments = {'status':'OK', 'data':{'comments':[],'comments_count':0}}
while True:
response = requests.get(url)
if response.status_code == 200:
try:
keys = response.json().keys()
#найти is jason
if 'data' in keys:
content = response.json()['data']
|
keys = response.json().keys()
url = ''
if 'paging' i
|
n keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for fb_comment in content:
fb_comments['data']['comments'].append(fb_comment)
if url =='':
break
except (KeyError, TypeError):
fb_comments['status'] = 'Unknown error'
break
else:
fb_comments['status'] = str(response.status_code)
break
fb_comments['data']['comments_count'] = len(fb_comments['data']['comments'])
return fb_comments
# Получаем все данные о странице
def fb_get_all_data(fb_page_id, access_token):
#считываем все данные группы
fb_group = fb_get_group_data(fb_page_id, access_token)
if fb_group['status'] == 'OK':
print('Group id: %s name: %s' % (fb_group['data']['id'], fb_group['data']['name']))
print('User in group: %s' % fb_group['data']['users_count'])
#пишем в бд в БД
else:
print(fb_group['status'])
exit()
#считываем все посты
print('*************считываем все посты************')
data = fb_get_all_posts(fb_page_id, access_token)
if data['status'] == 'OK':
fb_posts = copy.deepcopy(data['data']['posts'])
posts_count = data['data']['posts_count']
# print(fb_posts[0])
print('Posts in group:
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/EXT/vertex_shader.py
|
Python
|
lgpl-3.0
| 11,362
| 0.04031
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_vertex_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_vertex_shader',error_checker=_errors._error_checker)
GL_CURRENT_VERTEX_EXT=_C('GL_CURRENT_VERTEX_EXT',0x87E2)
GL_FULL_RANGE_EXT=_C('GL_FULL_RANGE_EXT',0x87E1)
GL_INVARIANT_DATATYPE_EXT=_C('GL_INVARIANT_DATATYPE_EXT',0x87EB)
GL_INVARIANT_EXT=_C('GL_INVARIANT_EXT',0x87C2)
GL_INVARIANT_VALUE_EXT=_C('GL_INVARIANT_VALUE_EXT',0x87EA)
GL_LOCAL_CONSTANT_DATATYPE_EXT=_C('GL_LOCAL_CONSTANT_DATATYPE_EXT',0x87ED)
GL_LOCAL_CONSTANT_EXT=_C('GL_LOCAL_CONSTANT_EXT',0x87C3)
GL_LOCAL_CONSTANT_VALUE_EXT=_C('GL_LOCAL_CONSTANT_VALUE_EXT',0x87EC)
GL_LOCAL_EXT=_C('GL_LOCAL_EXT',0x87C4)
GL_MATRIX_EXT=_C('GL_MATRIX_EXT',0x87C0)
GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT',0x87CA)
GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT',0x87CD)
GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT',0x87CE)
GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',0x87CC)
GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT',0x87CB)
GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT=_C('GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT',0x87C5)
GL_MAX_VERTEX_SHADER_INVARIANTS_EXT=_C('GL_MAX_VERTEX_SHADER_INVARIANTS_EXT',0x87C7)
GL_MAX_VERTEX_SHADER_LOCALS_EXT=_C('GL_MAX_VERTEX_SHADER_LOCALS_EXT',0x87C9)
GL_MAX_VERTEX_SHADER_LOCAL_CONSTAN
|
TS_EXT=_C('GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',0x87C8)
GL_MAX_VERTEX_SHADER_VARIANTS_EXT=_C('GL_MAX_VERTEX_SHADER_VARIANTS_EXT',0x87C6)
GL_MVP_MATRIX_EXT=_C('GL_MVP_MATRIX_EXT',0x87E3)
GL_NEGATIVE_ONE_EXT=_C('GL_NEGATIVE_ONE_EXT',0x87DF)
GL_NEGATIVE_W_EXT=_C('GL_NEGATIVE_W_EXT',0x87DC)
GL_NEGATIVE_X
|
_EXT=_C('GL_NEGATIVE_X_EXT',0x87D9)
GL_NEGATIVE_Y_EXT=_C('GL_NEGATIVE_Y_EXT',0x87DA)
GL_NEGATIVE_Z_EXT=_C('GL_NEGATIVE_Z_EXT',0x87DB)
GL_NORMALIZED_RANGE_EXT=_C('GL_NORMALIZED_RANGE_EXT',0x87E0)
GL_ONE_EXT=_C('GL_ONE_EXT',0x87DE)
GL_OP_ADD_EXT=_C('GL_OP_ADD_EXT',0x8787)
GL_OP_CLAMP_EXT=_C('GL_OP_CLAMP_EXT',0x878E)
GL_OP_CROSS_PRODUCT_EXT=_C('GL_OP_CROSS_PRODUCT_EXT',0x8797)
GL_OP_DOT3_EXT=_C('GL_OP_DOT3_EXT',0x8784)
GL_OP_DOT4_EXT=_C('GL_OP_DOT4_EXT',0x8785)
GL_OP_EXP_BASE_2_EXT=_C('GL_OP_EXP_BASE_2_EXT',0x8791)
GL_OP_FLOOR_EXT=_C('GL_OP_FLOOR_EXT',0x878F)
GL_OP_FRAC_EXT=_C('GL_OP_FRAC_EXT',0x8789)
GL_OP_INDEX_EXT=_C('GL_OP_INDEX_EXT',0x8782)
GL_OP_LOG_BASE_2_EXT=_C('GL_OP_LOG_BASE_2_EXT',0x8792)
GL_OP_MADD_EXT=_C('GL_OP_MADD_EXT',0x8788)
GL_OP_MAX_EXT=_C('GL_OP_MAX_EXT',0x878A)
GL_OP_MIN_EXT=_C('GL_OP_MIN_EXT',0x878B)
GL_OP_MOV_EXT=_C('GL_OP_MOV_EXT',0x8799)
GL_OP_MULTIPLY_MATRIX_EXT=_C('GL_OP_MULTIPLY_MATRIX_EXT',0x8798)
GL_OP_MUL_EXT=_C('GL_OP_MUL_EXT',0x8786)
GL_OP_NEGATE_EXT=_C('GL_OP_NEGATE_EXT',0x8783)
GL_OP_POWER_EXT=_C('GL_OP_POWER_EXT',0x8793)
GL_OP_RECIP_EXT=_C('GL_OP_RECIP_EXT',0x8794)
GL_OP_RECIP_SQRT_EXT=_C('GL_OP_RECIP_SQRT_EXT',0x8795)
GL_OP_ROUND_EXT=_C('GL_OP_ROUND_EXT',0x8790)
GL_OP_SET_GE_EXT=_C('GL_OP_SET_GE_EXT',0x878C)
GL_OP_SET_LT_EXT=_C('GL_OP_SET_LT_EXT',0x878D)
GL_OP_SUB_EXT=_C('GL_OP_SUB_EXT',0x8796)
GL_OUTPUT_COLOR0_EXT=_C('GL_OUTPUT_COLOR0_EXT',0x879B)
GL_OUTPUT_COLOR1_EXT=_C('GL_OUTPUT_COLOR1_EXT',0x879C)
GL_OUTPUT_FOG_EXT=_C('GL_OUTPUT_FOG_EXT',0x87BD)
GL_OUTPUT_TEXTURE_COORD0_EXT=_C('GL_OUTPUT_TEXTURE_COORD0_EXT',0x879D)
GL_OUTPUT_TEXTURE_COORD10_EXT=_C('GL_OUTPUT_TEXTURE_COORD10_EXT',0x87A7)
GL_OUTPUT_TEXTURE_COORD11_EXT=_C('GL_OUTPUT_TEXTURE_COORD11_EXT',0x87A8)
GL_OUTPUT_TEXTURE_COORD12_EXT=_C('GL_OUTPUT_TEXTURE_COORD12_EXT',0x87A9)
GL_OUTPUT_TEXTURE_COORD13_EXT=_C('GL_OUTPUT_TEXTURE_COORD13_EXT',0x87AA)
GL_OUTPUT_TEXTURE_COORD14_EXT=_C('GL_OUTPUT_TEXTURE_COORD14_EXT',0x87AB)
GL_OUTPUT_TEXTURE_COORD15_EXT=_C('GL_OUTPUT_TEXTURE_COORD15_EXT',0x87AC)
GL_OUTPUT_TEXTURE_COORD16_EXT=_C('GL_OUTPUT_TEXTURE_COORD16_EXT',0x87AD)
GL_OUTPUT_TEXTURE_COORD17_EXT=_C('GL_OUTPUT_TEXTURE_COORD17_EXT',0x87AE)
GL_OUTPUT_TEXTURE_COORD18_EXT=_C('GL_OUTPUT_TEXTURE_COORD18_EXT',0x87AF)
GL_OUTPUT_TEXTURE_COORD19_EXT=_C('GL_OUTPUT_TEXTURE_COORD19_EXT',0x87B0)
GL_OUTPUT_TEXTURE_COORD1_EXT=_C('GL_OUTPUT_TEXTURE_COORD1_EXT',0x879E)
GL_OUTPUT_TEXTURE_COORD20_EXT=_C('GL_OUTPUT_TEXTURE_COORD20_EXT',0x87B1)
GL_OUTPUT_TEXTURE_COORD21_EXT=_C('GL_OUTPUT_TEXTURE_COORD21_EXT',0x87B2)
GL_OUTPUT_TEXTURE_COORD22_EXT=_C('GL_OUTPUT_TEXTURE_COORD22_EXT',0x87B3)
GL_OUTPUT_TEXTURE_COORD23_EXT=_C('GL_OUTPUT_TEXTURE_COORD23_EXT',0x87B4)
GL_OUTPUT_TEXTURE_COORD24_EXT=_C('GL_OUTPUT_TEXTURE_COORD24_EXT',0x87B5)
GL_OUTPUT_TEXTURE_COORD25_EXT=_C('GL_OUTPUT_TEXTURE_COORD25_EXT',0x87B6)
GL_OUTPUT_TEXTURE_COORD26_EXT=_C('GL_OUTPUT_TEXTURE_COORD26_EXT',0x87B7)
GL_OUTPUT_TEXTURE_COORD27_EXT=_C('GL_OUTPUT_TEXTURE_COORD27_EXT',0x87B8)
GL_OUTPUT_TEXTURE_COORD28_EXT=_C('GL_OUTPUT_TEXTURE_COORD28_EXT',0x87B9)
GL_OUTPUT_TEXTURE_COORD29_EXT=_C('GL_OUTPUT_TEXTURE_COORD29_EXT',0x87BA)
GL_OUTPUT_TEXTURE_COORD2_EXT=_C('GL_OUTPUT_TEXTURE_COORD2_EXT',0x879F)
GL_OUTPUT_TEXTURE_COORD30_EXT=_C('GL_OUTPUT_TEXTURE_COORD30_EXT',0x87BB)
GL_OUTPUT_TEXTURE_COORD31_EXT=_C('GL_OUTPUT_TEXTURE_COORD31_EXT',0x87BC)
GL_OUTPUT_TEXTURE_COORD3_EXT=_C('GL_OUTPUT_TEXTURE_COORD3_EXT',0x87A0)
GL_OUTPUT_TEXTURE_COORD4_EXT=_C('GL_OUTPUT_TEXTURE_COORD4_EXT',0x87A1)
GL_OUTPUT_TEXTURE_COORD5_EXT=_C('GL_OUTPUT_TEXTURE_COORD5_EXT',0x87A2)
GL_OUTPUT_TEXTURE_COORD6_EXT=_C('GL_OUTPUT_TEXTURE_COORD6_EXT',0x87A3)
GL_OUTPUT_TEXTURE_COORD7_EXT=_C('GL_OUTPUT_TEXTURE_COORD7_EXT',0x87A4)
GL_OUTPUT_TEXTURE_COORD8_EXT=_C('GL_OUTPUT_TEXTURE_COORD8_EXT',0x87A5)
GL_OUTPUT_TEXTURE_COORD9_EXT=_C('GL_OUTPUT_TEXTURE_COORD9_EXT',0x87A6)
GL_OUTPUT_VERTEX_EXT=_C('GL_OUTPUT_VERTEX_EXT',0x879A)
GL_SCALAR_EXT=_C('GL_SCALAR_EXT',0x87BE)
GL_VARIANT_ARRAY_EXT=_C('GL_VARIANT_ARRAY_EXT',0x87E8)
GL_VARIANT_ARRAY_POINTER_EXT=_C('GL_VARIANT_ARRAY_POINTER_EXT',0x87E9)
GL_VARIANT_ARRAY_STRIDE_EXT=_C('GL_VARIANT_ARRAY_STRIDE_EXT',0x87E6)
GL_VARIANT_ARRAY_TYPE_EXT=_C('GL_VARIANT_ARRAY_TYPE_EXT',0x87E7)
GL_VARIANT_DATATYPE_EXT=_C('GL_VARIANT_DATATYPE_EXT',0x87E5)
GL_VARIANT_EXT=_C('GL_VARIANT_EXT',0x87C1)
GL_VARIANT_VALUE_EXT=_C('GL_VARIANT_VALUE_EXT',0x87E4)
GL_VECTOR_EXT=_C('GL_VECTOR_EXT',0x87BF)
GL_VERTEX_SHADER_BINDING_EXT=_C('GL_VERTEX_SHADER_BINDING_EXT',0x8781)
GL_VERTEX_SHADER_EXT=_C('GL_VERTEX_SHADER_EXT',0x8780)
GL_VERTEX_SHADER_INSTRUCTIONS_EXT=_C('GL_VERTEX_SHADER_INSTRUCTIONS_EXT',0x87CF)
GL_VERTEX_SHADER_INVARIANTS_EXT=_C('GL_VERTEX_SHADER_INVARIANTS_EXT',0x87D1)
GL_VERTEX_SHADER_LOCALS_EXT=_C('GL_VERTEX_SHADER_LOCALS_EXT',0x87D3)
GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT=_C('GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',0x87D2)
GL_VERTEX_SHADER_OPTIMIZED_EXT=_C('GL_VERTEX_SHADER_OPTIMIZED_EXT',0x87D4)
GL_VERTEX_SHADER_VARIANTS_EXT=_C('GL_VERTEX_SHADER_VARIANTS_EXT',0x87D0)
GL_W_EXT=_C('GL_W_EXT',0x87D8)
GL_X_EXT=_C('GL_X_EXT',0x87D5)
GL_Y_EXT=_C('GL_Y_EXT',0x87D6)
GL_ZERO_EXT=_C('GL_ZERO_EXT',0x87DD)
GL_Z_EXT=_C('GL_Z_EXT',0x87D7)
@_f
@_p.types(None,)
def glBeginVertexShaderEXT():pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBindLightParameterEXT(light,value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBindMaterialParameterEXT(face,value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum)
def glBindParameterEXT(value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glBindTexGenParameterEXT(unit,coord,value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBindTextureUnitParameterEXT(unit,value):pass
@_f
@_p.types(None,_cs.GLuint)
def glBindVertexShaderEXT(id):pass
@_f
@_p.types(None,_cs.GLuint)
def glDeleteVertexShaderEXT(id):pass
@_f
@_p.types(None,_cs.GLuint)
def glDisableVariantClientStateEXT(id):pass
@_f
@_p.types(None,_cs.GLuint)
def glEnableVariantClientStateEXT(id):pass
@_f
@_p.types(None,)
def glEndVertexShaderEXT():pass
@_f
@_p.types(No
|
SYSU-MATHZH/Dedekind-Django
|
project/sua/views/form/views2.py
|
Python
|
gpl-3.0
| 8,417
| 0.001901
|
from .base import BaseViewSet
from rest_framework.permissions import IsAdminUser
from project.sua.views.utils.mixins import NavMixin
from project.sua.permissions import IsTheStudentOrIsAdminUser, IsAdminUserOrReadOnly,IsAdminUserOrActivity,IsAdminUserOrStudent
from project.sua.models import Student, Sua, Activity, Application, Publicity, Appeal, Proof
import project.sua.views.form.serializers as firs
import project.sua.serializers as sirs
from rest_framework.decorators import list_route, detail_route
from rest_framework.response import Response
class StudentViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddStudentSerializer
queryset = Student.objects.filter(deleted_at=None)
revoke_queryset = Student.objects.all()
revoke_success_url = delete_success_url = '/'
# filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/student_form.html']
elif self.action == 'detail':
return ['sua/student_detail.html']
def get_serializer_class(self):
if self.action in ['add', 'change']:
return firs.AddStudentSerializer
elif self.action == 'detail':
return firs.detailofstudentSerializer
else:
return self.serializer_class
def get_permissions(self):
if self.action in ['add','change']:
permission_classes = (IsAdminUser, )
elif self.action in ['detail',]:
permission_classes = (IsAdminUserOrStudent, )
else:
permission_classes = (IsAdminUserOrReadOnly, )
return [permission() for permission in permission_classes]
class SuaViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddSuaSerializer
queryset = Sua.objects.filter(deleted_at=None)
revoke_queryset = Sua.objects.all()
revoke_success_url = delete_success_url = '/'
#filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/sua_form.html']
elif self.action == 'detail':
return ['sua/sua_detail.html']
def get_serializer_class(self):
if self.action in ['add', 'change', 'detail']:
return firs.AddSuaSerializer
else:
return self.serializer_class
def get_permissions(self):
if self.action == 'change':
permission_classes = (IsAdminUser, )
elif self.action == 'detail':
permission_classes == (IsTheStudentOrIsAdminUser,)
else:
permission_classes = (IsAdminUserOrReadOnly,)
return [permission() for permission in permission_classes]
class ActivityViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddActivitySerializer
queryset = Activity.objects.filter(deleted_at=None)
revoke_queryset = Activity.objects.all()
revoke_success_url = delete_success_url = '/'
#filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/activity_form.html']
elif self.action == 'detail':
return ['sua/activity_detail.html']
def get_serializer_class(self):
if self.action in [
|
'add', 'change', 'detail']:
return firs.AddActivitySerializer
else:
return self.serializer_class
def get_permissions(self):
|
if self.action in ['add', 'change', 'detail']:
permission_classes = (IsAdminUserOrActivity,)
else:
permission_classes = (IsAdminUserOrActivity, )
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class ApplicationViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddApplicationSerializer
queryset = Application.objects.filter(deleted_at=None)
revoke_queryset = Application.objects.all()
revoke_success_url = delete_success_url = '/'
#filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/application_form.html']
elif self.action == 'detail':
return ['sua/application_detail.html']
def get_serializer_class(self):
if self.action in ['add', 'change', 'detail']:
return firs.AddApplicationSerializer
else:
return self.serializer_class
def get_permissions(self):
if self.action == 'change':
permission_classes = (IsAdminUser, )
elif self.action == 'detail':
permission_classes = (IsTheStudentOrIsAdminUser,)
else:
permission_classes = (IsAdminUserOrReadOnly,)
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class PublicityViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddPublicitySerializer
queryset = Publicity.objects.filter(deleted_at=None)
revoke_queryset = Publicity.objects.all()
revoke_success_url = delete_success_url = '/'
#filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/publicity_form.html']
elif self.action == 'detail':
return ['sua/publicity_detail.html']
def get_serializer_class(self):
if self.action in ['add', 'change', 'detail']:
return firs.AddPublicitySerializer
else:
return self.serializer_class
def get_permissions(self):
if self.action == 'change':
permission_classes = (IsAdminUser, )
elif self.action == 'detail':
permission_classes = (IsTheStudentOrIsAdminUser,)
else:
permission_classes = (IsAdminUserOrReadOnly, )
return [permission() for permission in permission_classes]
class AppealViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddAppealSerializer
queryset = Appeal.objects.filter(deleted_at=None)
revoke_queryset = Appeal.objects.all()
revoke_success_url = delete_success_url = '/'
#filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/appeal_form.html']
elif self.action == 'detail':
return ['sua/appeal_detail.html']
def get_serializer_class(self):
if self.action in ['add', 'change', 'detail']:
return firs.AddAppealSerializer
else:
return self.serializer_class
def get_permissions(self):
if self.action == 'change':
permission_classes = (IsAdminUser, )
elif self.action == 'detail':
permission_classes = (IsTheStudentOrIsAdminUser, )
else:
permission_classes = (IsAdminUserOrReadOnly, )
return [permission() for permission in permission_classes]
class ProofViewSet(BaseViewSet, NavMixin):
components = {
'nav': 'nav',
}
serializer_class = firs.AddProofSerializer
queryset = Proof.objects.filter(deleted_at=None)
revoke_queryset = Proof.objects.all()
revoke_success_url = delete_success_url = '/'
#filter_fields = ('grade', 'classtype')
def get_template_names(self):
if self.action in ['add', 'change']:
return ['sua/sua_form.html']
elif self.action == 'detail':
return ['sua/proof_detail.html']
def get_serializer_class(self):
if self.action in ['add', 'change', 'detail']:
return firs.AddProofSerializer
else:
return self.serializer_class
def get_permissions(self):
if self.action == 'change':
permission_classes = (IsAdminUser,)
|
datalogics/scons
|
test/Fortran/F90FLAGS.py
|
Python
|
mit
| 6,990
| 0.00329
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import string
import sys
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
_exe = TestSCons._exe
if sys.platform == 'win32':
test.write('mylink.py', r"""
import string
import sys
args = sys.argv[1:]
while args:
a = args[0]
if a[0] != '/':
break
args = args[1:]
if string.lower(a[:5]) == '/out:': out = a[5:]
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:5] != '#link':
outfile.write(l)
sys.exit(0)
""")
else:
test.write('mylink.py', r"""
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], 'o:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:5] != '#link':
outfile.write(l)
sys.exit(0)
""")
test.write('myfortran.py', r"""
import getopt
import sys
comment = '#' + sys.argv[1]
opts, args = getopt.getopt(sys.argv[2:], 'co:xy')
optstring = ''
for opt, arg in opts:
if opt == '-o': out = arg
else: optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write(optstring + "\n")
for l in infile.readlines():
if l[:len(comment)] != comment
|
:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct',
|
"""
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
F90 = r'%(_python_)s myfortran.py g90',
F90FLAGS = '-x',
FORTRAN = r'%(_python_)s myfortran.py fortran',
FORTRANFLAGS = '-y')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
env.Program(target = 'test09', source = 'test09.f77')
env.Program(target = 'test10', source = 'test10.F77')
env.Program(target = 'test11', source = 'test11.f90')
env.Program(target = 'test12', source = 'test12.F90')
env.Program(target = 'test13', source = 'test13.f95')
env.Program(target = 'test14', source = 'test14.F95')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortran\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortran\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortran\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortran\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortran\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortran\n")
test.write('test09.f77', "This is a .f77 file.\n#link\n#fortran\n")
test.write('test10.F77', "This is a .F77 file.\n#link\n#fortran\n")
test.write('test11.f90', "This is a .f90 file.\n#link\n#g90\n")
test.write('test12.F90', "This is a .F90 file.\n#link\n#g90\n")
test.write('test13.f95', "This is a .f95 file.\n#link\n#fortran\n")
test.write('test14.F95', "This is a .F95 file.\n#link\n#fortran\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, " -c -y\nThis is a .f file.\n")
test.must_match('test02' + _exe, " -c -y\nThis is a .F file.\n")
test.must_match('test03' + _exe, " -c -y\nThis is a .for file.\n")
test.must_match('test04' + _exe, " -c -y\nThis is a .FOR file.\n")
test.must_match('test05' + _exe, " -c -y\nThis is a .ftn file.\n")
test.must_match('test06' + _exe, " -c -y\nThis is a .FTN file.\n")
test.must_match('test07' + _exe, " -c -y\nThis is a .fpp file.\n")
test.must_match('test08' + _exe, " -c -y\nThis is a .FPP file.\n")
test.must_match('test09' + _exe, " -c -y\nThis is a .f77 file.\n")
test.must_match('test10' + _exe, " -c -y\nThis is a .F77 file.\n")
test.must_match('test11' + _exe, " -c -x\nThis is a .f90 file.\n")
test.must_match('test12' + _exe, " -c -x\nThis is a .F90 file.\n")
test.must_match('test13' + _exe, " -c -y\nThis is a .f95 file.\n")
test.must_match('test14' + _exe, " -c -y\nThis is a .F95 file.\n")
g90 = test.detect('F90', 'g90')
FTN_LIB = TestSCons.fortran_lib
if g90:
test.write("wrapper.py",
"""import os
import string
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(string.join(sys.argv[1:], " "))
""" % string.replace(test.workpath('wrapper.out'), '\\', '\\\\'))
test.write('SConstruct', """
foo = Environment(LIBS = %(FTN_LIB)s)
f90 = foo.Dictionary('F90')
bar = foo.Clone(F90 = r'%(_python_)s wrapper.py ' + f90, F90FLAGS = '-Ix')
foo.Program(target = 'foo', source = 'foo.f')
bar.Program(target = 'bar', source = 'bar.f')
""" % locals())
test.write('foo.f', r"""
PROGRAM FOO
USE MOD_BAR
PRINT *,'foo.f'
CALL P
STOP
END
MODULE MOD_BAR
IMPLICIT NONE
CONTAINS
SUBROUTINE P
PRINT *,'mod_bar'
END SUBROUTINE P
END MODULE MOD_BAR
""")
test.write('bar.f', r"""
PROGRAM BAR
USE MOD_FOO
PRINT *,'bar.f'
CALL P
STOP
END
""")
test.write('foo_mod.f', r"""
MODULE MOD_FOO
IMPLICIT NONE
CONTAINS
SUBROUTINE P
PRINT *,'mod_foo'
END SUBROUTINE P
END MODULE MOD_FOO
""")
test.run(arguments = 'foo' + _exe, stderr = None)
test.run(program = test.workpath('foo'), stdout = " foo.f\n")
test.must_not_exist('wrapper.out')
test.run(arguments = 'bar' + _exe)
test.run(program = test.workpath('bar'), stdout = " bar.f\n")
test.must_match('wrapper.out', "wrapper.py\n")
test.pass_test()
|
DannyVim/ToolsCollection
|
Outdated/db_movie.py
|
Python
|
gpl-2.0
| 2,485
| 0.000461
|
# -*- coding: utf-8 -*-
"""
这是一个用以获取用户豆瓣数据的爬虫,使得用户可以进行数据的本地备份。
支持:
1.豆瓣电影,豆瓣读书【暂不支持】
2.csv文件为逗号分割符文件。
@author: DannyVim
"""
import urllib2 as ur
from bs4 import BeautifulSoup as bs
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
# BASE URL
def basepage(wa):
m_wish = 'http://movie.douban.com/people/' + user + '/wish?start='
m_do = 'http://movie.douban.com/people/' + user + '/do?start='
m_collect = 'http://movie.douban.com/people/' + user + '/collect?start='
if wa == 'do':
baseurl = m_do
elif wa == 'wish':
baseurl = m_wish
elif wa == 'collect':
baseurl = m_collect
link_list(baseurl)
# 知道目录下有多少页,并且打开每一页获取数据
def link_list(pageurl):
info = ur.urlopen(pageurl)
soup = bs(info)
try:
t = soup.find('span', class_='thispage')['data-total-page']
except TypeError:
content(pageurl)
else:
n = 0
t = int(t) - 1
for i in range(t):
pagelist = pageurl + str(n)
content(pagelist)
n = n + 15
# 显示程序运行进度,但是这个只在CMD中有效OTZ
percent = 1.0 * i / t * 100
print 'complete percent:' + str(percent) + '%',
sys.stdout.write("\r")
time.sleep(0.1)
# 利用bs4库把静态的网页解析出来并挑选有用数据
def content(html):
info = ur.urlopen(html)
soup = bs(info)
for
|
tag in soup.body(attrs={'class': 'item'}):
datum = open('datum.csv', 'a+')
title = ta
|
g.em.string.strip()
url = tag.li.a.get('href')
date = tag.find('span', class_='date').get_text()
comment = tag.find('span', class_='comment')
if comment == None:
comment = ''
else:
comment = comment.get_text()
comment = comment.encode('utf-8')
title = title.encode('utf-8')
url = url.encode('utf-8')
date = date.encode('utf-8')
print >> datum, url, ',', date, ',', title, ',', comment
datum.close()
# 运行
print u'这是一个用以获取用户豆瓣数据的爬虫,使得用户可以进行数据的本地备份。'
user = raw_input('Please input your DB user name:')
wanted = raw_input('Please input what you want to sync:(do,wish,collect)')
basepage(wanted)
|
yrchen/CommonRepo
|
commonrepo/groups/migrations/0002_group_members.py
|
Python
|
apache-2.0
| 508
| 0
|
# -*- coding: u
|
tf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL
|
),
),
]
|
mdda/Reverse-GoL
|
benchmark/speed_numpy.py
|
Python
|
mit
| 1,447
| 0.034554
|
import numpy
def iterate(Z):
# find number of neighbours that each square has
N = numpy.zeros(Z.shape)
N[1:, 1:] += Z[:-1, :-1]
N[1:, :-1] += Z[:-1, 1:]
N[:-1, 1:] += Z[1:, :-1]
N[:-1, :-1] += Z[1:, 1:]
N[:-1, :] += Z[1:, :]
N[1:, :] += Z[:-1, :]
N[:, :-1] += Z[:, 1:]
N[:, 1:] += Z[:, :-1]
# a live cell is killed if it has fewer
# than 2 or more than 3 neighbours.
part1 = ((Z == 1) & (N < 4) & (N > 1))
# a new cell forms if a square has exactly three members
part2 = ((Z == 0) & (N == 3))
return (part1 | part2).astype(int)
Z = numpy.array([[0,0,0,0,0,0],
[0,0,0,1,0,0],
[0,1,0,1,0,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]])
glider = numpy.array([[0,0,1],
[1,0,1],
[0,1,1]])
Z = numpy.zeros((22,22), dtype=numpy.int)
Z[1:1+glider.shape[0], 1:1+glider.shape[1]] = glider
print 'I
|
nitial state:'
print Z[1:-1,1:-1]
for i in range(65):
Z = iterate(
|
Z)
print 'Final state:'
#print Z[1:-1,1:-1]
print Z[:,:]
print "Problem with edges..."
def test_timing():
import timeit
def time_iter():
Z = numpy.zeros((22,22), dtype=numpy.int)
Z[1:1+glider.shape[0], 1:1+glider.shape[1]] = glider
for i in range(65):
Z = iterate(Z)
t=timeit.Timer(time_iter)
print t.repeat(1, 1000)
test_timing()
|
orbitfp7/nova
|
nova/tests/unit/test_hacking.py
|
Python
|
apache-2.0
| 22,417
| 0.000491
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import mock
import pep8
from nova.hacking import checks
from nova import test
class HackingTestCase(test.NoDBTestCase):
"""This class tests the hacking checks in nova.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_virt_driver_imports(self):
expect = (0, "N311: importing code from other virt drivers forbidden")
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"import nova.virt.libvirt.utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/libvirt/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"import nova.virt.firewall",
"./nova/virt/libvirt/firewall.py"))
def test_virt_driver_config_vars(self):
self.assertIsInstance(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/xenapi/driver.py"), tuple)
self.assertIsNone(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/libvirt/volume.py"))
def test_no_vi_headers(self):
lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n']
self.assertIsNone(checks.no_vi_headers(
"Test string foo", 1, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
2, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
6, lines))
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
9, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
14, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"Test end string for vi",
15, lines))
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))), 1)
self.assertEqual(
len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
def test_assert_equal_type(self):
self.assertEqual(len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
def test_assert_equal_in(self):
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), True)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, any(a==1 for a in b))"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), False)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, any(a==1 for a in b))"))), 0)
def test_assert_equal_none(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
def test_assert_true_or_false_with_in_or_not_in(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B)"))), 1)
self.assertEqu
|
al(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
|
"self.assertFalse(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in
|
sachinpro/sachinpro.github.io
|
tensorflow/python/training/momentum_test.py
|
Python
|
apache-2.0
| 17,251
| 0.002493
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MomentumOptimizerTest(tf.test.TestCase):
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def testTensorLearningRateAndMomentum(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(
learning_rate=tf.constant(2.0), momentum=tf.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of siz
|
e 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037
|
018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]
db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]
db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]
db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]
db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]
db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.
|
addition-it-solutions/project-all
|
addons/resource/faces/__init__.py
|
Python
|
agpl-3.0
| 1,258
| 0.00159
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (
|
<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
#
|
published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pcalendar import Calendar, WorkingDate, StartDate, EndDate, Minutes
from task import Project, BalancedProject, AdjustedProject, Task, \
STRICT, SLOPPY, SMART, Multi, YearlyMax, WeeklyMax, MonthlyMax, \
DailyMax, VariableLoad
from resource import Resource
|
tacaswell/channelarchiver
|
tests/mock_archiver.py
|
Python
|
mit
| 5,534
| 0.002168
|
# -*- coding: utf-8 -*-
import os
import json
import re
try:
from xmlrpclib import Fault, ProtocolError
except ImportError: # Python 3
from xmlrpc.client import Fault, ProtocolError
from channelarchiver import codes, utils
tests_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(tests_dir, 'data')
def read_data(filename):
path = os.path.join(data_dir, filename + '.json')
return json.loads(open(path).read())
def check_type(value, check_type, expected_name):
if not isinstance(value, check_type):
supplied_name = type(value).__name__.upper()
raise Fault(codes.xmlrpc.TYPE,
('Value of type {0} supplied where type {1} was '
'expected.').format(supplied_name, expected_name))
class MockArchiver(object):
'''
A mock class to simulate XML-RPC calls to a Channel Archiver.
Loads data for a mock archiver with the following archives and
channels:
1001
* EXAMPLE:DOUBLE_SCALAR
- 2012-07-12 21:47:23.663999895: 200.5
- 2012-07-13 02:05:01.443588732: 199.9
- 2012-07-13 07:19:31.806097162: 198.7
- 2012-07-13 11:18:55.671259311: 196.1
* EXAMPLE:INT_WAVEFORM
- 2012-07-12 23:14:19.129599795: [3, 5, 13]
- 2012-07-13 01:31:52.557222630: [2, 4, 11]
- 2012-07-13 08:26:18.558211062: [0, 7, 1]
1008
* EXAMPLE:ENUM_SCALAR
- 2012-07-12 22:41:10.765675810: 7
- 2012-07-13 03:15:42.414257465: 1
- 2012-07-13 09:20:23.623788581: 8
'''
def __init__(self):
self._archives = read_data('archives')
self._info = read_data('info')
def info(self):
return self._info
def archives(self):
archives = []
for key, archive_spec in self._archives.items():
archives.append({
'key': int(key),
'name': archive_spec['name'],
'path': archive_spec['path']
})
|
return archives
def names(self, key, pattern):
check_type(key, int, 'INT')
check_type(pattern, utils.StrType, 'STRING')
pattern = '.*{0}.*'.format(pattern)
key = str(key)
self._check_key(key)
archive_data = self._archives[key]['data']
regex = re.compile(pattern)
return_data = []
for channe
|
l, channel_data in archive_data.items():
if regex.match(channel) is None:
continue
values = channel_data['values']
first_value = values[0]
last_value = values[-1]
return_data.append({
'name': channel,
'start_sec': first_value['secs'],
'start_nano': first_value['nano'],
'end_sec': last_value['secs'],
'end_nano': last_value['nano']
})
return return_data
def values(self, key, channels, start_sec, start_nano,
end_sec, end_nano, count, interpolation):
check_type(key, int, 'INT')
check_type(channels, (list, tuple), 'ARRAY')
for value in [start_sec,
start_nano,
end_sec,
end_nano,
count,
interpolation]:
if not isinstance(value, int):
raise ProtocolError(
'cr01arc01/cgi-bin/ArchiveDataServer.cgi',
codes.xmlrpc.INTERNAL, 'Internal Server Error',
None)
if not 0 <= interpolation <= 4:
raise Fault(codes.archiver.ARGUMENT_ERROR,
'Invalid how={0}'.format(interpolation))
if interpolation != 0:
raise Exception('Only raw interpolation is supported by'
'MockArchiver.')
key = str(key)
self._check_key(key)
archive_data = self._archives[key]['data']
return_data = []
start = start_sec + 1e-9 * start_nano
end = end_sec + 1e-9 * end_nano
for channel in channels:
try:
channel_data = archive_data[channel].copy()
channel_values = channel_data['values']
for index, value in enumerate(channel_values):
time = value['secs'] + 1e-9 * value['nano']
if not start <= time <= end:
channel_values.pop(index)
del channel_values[count:]
except KeyError:
channel_data = {
'count': 1,
'meta': {
'alarm_high': 0.0,
'alarm_low': 0.0,
'disp_high': 10.0,
'disp_low': 0.0,
'prec': 1,
'type': 1,
'units': '<NO DATA>',
'warn_high': 0.0,
'warn_low': 0.0
},
'name': channel,
'type': 1,
'values': []
}
return_data.append(channel_data)
return return_data
def _check_key(self, key):
if key not in self._archives:
raise Fault(codes.archiver.NO_INDEX,
"Invalid key {0}". format(key))
def __getattr__(self, name):
raise Fault(codes.xmlrpc.NO_SUCH_METHOD,
"Method 'archiver.{0}' not defined". format(name))
|
c-goosen/mytransport-hackathon
|
api/endpoints/interest.py
|
Python
|
mit
| 7,851
| 0.008661
|
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import json
import falcon
import urllib
import uuid
import settings
import requests
from geopy.geocoders import Nominatim
import geopy.distance
from geopy.distance import vincenty
import datetime
radius = []
radius_maps = []
#geoJSON template to create radius (polygon) on geojson.io
geoJSON_template = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
]
}
}
]
}
class interest(object):
global radius
interested = {}
#radius = []i
def proximity_to_others(self, my_coordinates):
if radius:
for x in radius:
radius_center = (x['center'][0],x['center'][1])
my_coordinates = (my_coordinates[0], my_coordinates[1])
distance = vincenty(radius_center, my_coordinates).kilometers
print("Proximity distance")
print(distance)
return distance, x["center"]
else:
return 0, []
def geojson_io_prox(self, resp, my_coordinates, user_name):
global radius
distance = 0
radius = []
try:
distance,radius = self.proximity_to_others(my_coordinates)
except Exception as e:
print(e)
if not distance or distance < 1:
points = []
start = geopy.Point(my_coordinates[0], my_coordinates[1])
d = geopy.distance.VincentyDistance(kilometers = 1)
for x in range(0,360, 10):
points.append(d.destination(point=start, bearing=x))
print("\n\n POINTS")
print("\n\n")
radius_dict = {
'center': my_coordinates,
'radius': points,
'people': [user_name,],
'created_date': datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S %Z %Y")
}
radius.append(radius_dict)
print("\n\n RADIUS: ")
print(radius)
print("\n\n")
else:
for x in radius:
if x["center"] == radius:
x['people'].append(
{'name': user_name,
'coordinates':
my_coordinates}
)
def proximity(self,req, resp, my_coordinates, user_name):
# Works out user/client proximity to mytransport API stops
# Works on a radius of 1km. Assumption on average walk time
global radius_maps
google_map_url = "http://www.google.com/maps/place/"
query_params = {"point":"{},{}".format(my_coordinates[0], my_coordinates[1]),
"radius":"1000"}
endpoint ="api/stops"
headers = {"Authorization": "Bearer {}".format(settings.ACCESS_TOKEN)}
request = requests.get("{}/{}".format(settings.API_URL,endpoint),
params=query_params,
headers=headers)
print("Response from api/stops")
print(request.status_code)
response_data = request.json()
print(type(response_data))
if not response_data:
resp.status = falcon.HTTP_200
your_radius_map = ""
for x in radius_maps:
if x["center"] == my_coordinates:
your_radius_map = x["geoJSON_url"]
messge_dict = {'message' :
"No stops in your area, adding you to interest area", "maps": your_radius_map}
resp.body = json.dumps(messge_dict)
return False
else:
map_list = []
message_dict = {"message":"", "maps":[]}
for x in response_data:
print(x)
if 'geometry' in x:
coordinates = x["geometry"]["coordinates"]
map_list.append("{}{},{}".format(google_map_url,
coordinates[1],
coordinates[0]))
message_dict["maps"] = map_list
if message_dict:
message_dict["message"] = """You have existing stops within 1km
of your location"""
else:
message_dict["messsage"] = """You\shave no existing stops nearby,
we will combine your interest in a stop with others in the area"""
resp.body = json.dumps(message_dict)
resp.status = falcon.HTTP_200
return True
#return True
def geopy_coordinates(self, address,resp):
try:
geolocator = Nominatim()
location = geolocator.geocode(address)
if location.latitude and location.longitude:
return [location.latitude, location.longitude]
except Exception as e:
print(e)
resp.body = """{'message':'Bad address,
try being more specific and try agai'}"""
resp.status = falcon.HTTP_400
|
def on_get(self, req, resp):
resp_dict = {"message":"Post request needed with GeoLocation data"}
resp.body = json.dumps(resp_dict)
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
# Main API method, post the following
'''
POST Request
data type: JSON
Required: name, address or coordinates
data form
|
at : {
"name" : "Yourname",
"address" : "Your number and street address, province, etc"
"geometry" : { "coordinates" : ["x", "y"] }
'''
global radius_maps
global radius
print(req.headers)
user_name = ""
post_data = json.load(req.stream)
print(post_data)
if "name" in post_data:
user_name = post_data["name"]
print("Username IF statement")
print(user_name)
if "geometry" in post_data:
if not self.proximity(req,resp, post_data["geometry"]["coordinates"],user_name):
self.geojson_io_prox(resp, post_data["geometry"]["coordinates"],user_name)
elif post_data["address"]:
if "address" in post_data:
my_coordinates = self.geopy_coordinates(post_data["address"],resp)
print("BASED ON ADDRESS")
proximity = self.proximity(req, resp, my_coordinates, user_name)
print("PROXIMITY")
print(proximity)
if proximity == False:
print("NO routes")
self.geojson_io_prox(resp,my_coordinates, user_name)
else:
falcon.HTTPMissingParam
resp_dict = { 'message' :
'Please supply a address or coordinates (long,lat)'}
# json.dumps allows proper formating of message
resp.body = json.dumps(resp_dict)
print("Current Radius")
print(radius)
radius_list = []
radius_maps = []
for x in radius:
for y in x['radius']:
radius_list.append([y[1],y[0]])
radius_list.append([x['radius'][0][1],x['radius'][0][0]])
geoJSON_template['features'][0]['geometry']['coordinates'].append(radius_list)
radius_maps.append( {
'center': x['center'],
'geoJSON': geoJSON_template,
'geoJSON_url' : "http://geojson.io/#map=5/{}/{}&data=data:application/json,{}".format(
x['center'][1], x['center'][0], urllib.quote(json.dumps(geoJSON_template).encode()) )
}
)
#resp.body
print(radius_maps)
|
ptonner/GPy
|
GPy/testing/model_tests.py
|
Python
|
bsd-3-clause
| 25,915
| 0.002971
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
class MiscTests(unittest.TestCase):
def setUp(self):
self.N = 20
self.N_new = 50
self.D = 1
self.X = np.random.uniform(-3., 3., (self.N, 1))
self.Y = np.sin(self.X) + np.random.randn(self.N, self.D) * 0.05
self.X_new = np.random.uniform(-3., 3., (self.N_new, 1))
def test_setXY(self):
m = GPy.models.GPRegression(self.X, self.Y)
m.set_XY(np.vstack([self.X, np.random.rand(1,self.X.shape[1])]), np.vstack([self.Y, np.random.rand(1,self.Y.shape[1])]))
m._trigger_params_changed()
self.assertTrue(m.checkgrad())
m.predict(m.X)
def test_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.GPRegression(self.X, self.Y, kernel=k)
m.randomize()
m.likelihood.variance = .5
Kinv = np.linalg.pinv(k.K(self.X) + np.eye(self.N) * m.likelihood.variance)
K_hat = k.K(self.X_new) - k.K(self.X_new, self.X).dot(Kinv).dot(k.K(self.X, self.X_new))
mu_hat = k.K(self.X_new, self.X).dot(Kinv).dot(m.Y_normalized)
mu, covar = m._raw_pr
|
edict(self.X_new, full_cov=True)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(covar.shape, (self.N_new, self.N_n
|
ew))
np.testing.assert_almost_equal(K_hat, covar)
np.testing.assert_almost_equal(mu_hat, mu)
mu, var = m._raw_predict(self.X_new)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(var.shape, (self.N_new, 1))
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
np.testing.assert_almost_equal(mu_hat, mu)
def test_normalizer(self):
k = GPy.kern.RBF(1)
Y = self.Y
mu, std = Y.mean(0), Y.std(0)
m = GPy.models.GPRegression(self.X, Y, kernel=k, normalizer=True)
m.optimize()
assert(m.checkgrad())
k = GPy.kern.RBF(1)
m2 = GPy.models.GPRegression(self.X, (Y-mu)/std, kernel=k, normalizer=False)
m2[:] = m[:]
mu1, var1 = m.predict(m.X, full_cov=True)
mu2, var2 = m2.predict(m2.X, full_cov=True)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = m2.predict(m2.X, full_cov=False)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
q50n = m.predict_quantiles(m.X, (50,))
q50 = m2.predict_quantiles(m2.X, (50,))
np.testing.assert_allclose(q50n[0], (q50[0]*std)+mu)
def check_jacobian(self):
try:
import autograd.numpy as np, autograd as ag, GPy, matplotlib.pyplot as plt
from GPy.models import GradientChecker, GPRegression
except:
raise self.skipTest("autograd not available to check gradients")
def k(X, X2, alpha=1., lengthscale=None):
if lengthscale is None:
lengthscale = np.ones(X.shape[1])
exp = 0.
for q in range(X.shape[1]):
exp += ((X[:, [q]] - X2[:, [q]].T)/lengthscale[q])**2
#exp = np.sqrt(exp)
return alpha * np.exp(-.5*exp)
dk = ag.elementwise_grad(lambda x, x2: k(x, x2, alpha=ke.variance.values, lengthscale=ke.lengthscale.values))
dkdk = ag.elementwise_grad(dk, argnum=1)
ke = GPy.kern.RBF(1, ARD=True)
#ke.randomize()
ke.variance = .2#.randomize()
ke.lengthscale[:] = .5
ke.randomize()
X = np.linspace(-1, 1, 1000)[:,None]
X2 = np.array([[0.]]).T
np.testing.assert_allclose(ke.gradients_X([[1.]], X, X), dk(X, X))
np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X).sum(0), dkdk(X, X))
np.testing.assert_allclose(ke.gradients_X([[1.]], X, X2), dk(X, X2))
np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X2).sum(0), dkdk(X, X2))
m = GPRegression(self.X, self.Y)
def f(x):
m.X[:] = x
return m.log_likelihood()
def df(x):
m.X[:] = x
return m.kern.gradients_X(m.grad_dict['dL_dK'], X)
def ddf(x):
m.X[:] = x
return m.kern.gradients_XX(m.grad_dict['dL_dK'], X).sum(0)
gc = GradientChecker(f, df, self.X)
gc2 = GradientChecker(df, ddf, self.X)
assert(gc.checkgrad())
assert(gc2.checkgrad())
def test_sparse_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
m.randomize()
Z = m.Z[:]
# Not easy to check if woodbury_inv is correct in itself as it requires a large derivation and expression
Kinv = m.posterior.woodbury_inv
K_hat = k.K(self.X_new) - k.K(self.X_new, Z).dot(Kinv).dot(k.K(Z, self.X_new))
mu, covar = m._raw_predict(self.X_new, full_cov=True)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(covar.shape, (self.N_new, self.N_new))
np.testing.assert_almost_equal(K_hat, covar)
# np.testing.assert_almost_equal(mu_hat, mu)
mu, var = m._raw_predict(self.X_new)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(var.shape, (self.N_new, 1))
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
# np.testing.assert_almost_equal(mu_hat, mu)
def test_likelihood_replicate(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[''].values()
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m['']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.Gaussian_noise.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m['.*var'] = 2
m2['.*var'] = m['.*var']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
def test_likelihood_set(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
def test_missing_data(self):
from GPy import kern
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.examples.dimensionality_reduction import _simulate_matern
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, False)
Y = Ylist[0]
inan = np.random.binomial(1, .9, size=Y.shape).astype(bool) # 80% missing data
Ymissing = Y.copy()
Ymissing[inan] = np.nan
k = kern.Linear(Q, ARD=True) + kern.White(Q, np.exp(-2)) # + kern.bia
|
wooga/airflow
|
tests/providers/google/cloud/operators/test_dataflow.py
|
Python
|
apache-2.0
| 11,898
| 0.001009
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from airflow.providers.google.cloud.operators.dataflow import (
CheckJobRunning, DataflowCreateJavaJobOperator, DataflowCreatePythonJobOperator,
DataflowTemplatedJobStartOperator,
)
from airflow.version import version
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
PY_INTERPRETER = 'python3'
JAR_FILE = 'gs://my-bucket/example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.providers.google.cloud.operators.dataflow.{}'
TEST_LOCATION = "custom-location"
class TestDataflowPythonOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreatePythonJobOperator(
task_id=TASK_ID,
py_file=PY_FILE,
job_name=JOB_NAME,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION
)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.py_interpreter, PY_INTERPRETER)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
|
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
|
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_provide_file.assert_called_once_with(object_url=PY_FILE)
start_python_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=expected_options,
dataflow=mock.ANY,
py_options=PY_OPTIONS,
py_interpreter=PY_INTERPRETER,
py_requirements=[],
py_system_site_packages=False,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class TestDataflowJavaOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreateJavaJobOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_name=JOB_NAME,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION
)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
self.assertEqual(self.dataflow.check_if_running, CheckJobRunning.WaitForRun)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = CheckJobRunning.IgnoreJob
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
start_java_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=mock.ANY,
jar=mock.ANY,
job_class=JOB_CLASS,
append_job_name=True,
multiple_jobs=None,
on_new_job_id_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = True
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_provide_file.assert_not_called()
start_java_hook.assert_not_called()
dataflow_running.assert_called_once_with(
name=JOB_NAME, variables=mock.ANY, project_id=None, location=TEST_LOCATION)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_not_running_exec(self, gcs_hook, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check if job is running
"""
dataflow_running = dataflo
|
numpy/numpy-refactor
|
numpy/random/mtrand/generate_mtrand_c.py
|
Python
|
bsd-3-clause
| 352
| 0
|
import re
import subprocess
def remove_long_path():
path = 'mtrand.c'
pat = re.compile(r'"
|
[^"]*mtrand\.pyx"')
code = open(path).read()
code = pat.sub(r'"mtrand.pyx"
|
', code)
open(path, 'w').write(code)
def main():
subprocess.check_call(['cython', 'mtrand.pyx'])
remove_long_path()
if __name__ == '__main__':
main()
|
AkioNak/bitcoin
|
test/functional/wallet_importprunedfunds.py
|
Python
|
mit
| 5,280
| 0.001515
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.address import key_to_p2wpkh
from test_framework.key import ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif
class ImportPrunedFundsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
eckey = ECKey()
eckey.generate()
address3_privkey = bytes_to_wif(eckey.get_bytes())
address3 = key_to_p2wpkh(eckey.get_pubkey().get_bytes())
self.nodes[0].importprivkey(address3_privkey)
# Check only one address
address_info = self.nodes[0].getaddressinfo(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), COINBASE_MATURITY + 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.generate(self.nodes[0], 1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.generate(self.nodes[0], 1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.generate(self.nodes[0], 1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
# Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance()
assert_equal(balance1, Decimal(0))
# Import with affiliated address with no rescan
self.nodes[1].createwallet('wwatch', disable_private_keys=True)
wwatch = self.nodes[1].get_wallet_rpc('wwatch')
wwatch.importaddress(address=address2, rescan=False)
wwatch.importprunedfunds(rawtransaction=rawtxn2, txoutproof=proof2)
assert [tx for tx in wwatch.listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
# Import with private key with no rescan
w1 = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
w1.importprivkey(privkey=address3_privkey, rescan=False)
w1.importprunedfunds(rawtxn3, proof3)
assert [tx for tx in w1.listtransactions() if tx['txid'] == txnid3]
balance3 = w1.getbalance()
assert_equal(balance3, Decimal('0.025'))
# Addresses Test - after import
address_info = w1.getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = wwatch.getaddressinfo(address2)
if self.options.descriptors:
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
else:
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = w1.getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
# Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", w1.removeprun
|
edfunds, txnid1)
assert not [tx for tx in w1.listtransactions(include_watchonly=True) if tx['txid'] == txnid1]
wwatch.r
|
emoveprunedfunds(txnid2)
assert not [tx for tx in wwatch.listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
w1.removeprunedfunds(txnid3)
assert not [tx for tx in w1.listtransactions(include_watchonly=True) if tx['txid'] == txnid3]
if __name__ == '__main__':
ImportPrunedFundsTest().main()
|
rnowling/humbaba
|
humbaba/augment_samples.py
|
Python
|
apache-2.0
| 6,775
| 0.002657
|
"""
Co
|
pyright 2017 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless
|
required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import defaultdict
from collections import OrderedDict
import itertools
import random
from humbaba.ioutils import read_populations
from humbaba.ioutils import write_populations
from humbaba.sampling import MultinomialSampler
from humbaba.vcf import append_variants
from humbaba.vcf import filter_samples
from humbaba.vcf import VCFReader
from humbaba.vcf import VCFWriter
EPS = 1e-5
def augment_sample_names(sample_names, n_blocks):
augmented_sample_names = []
for i in xrange(n_blocks):
for j, name in enumerate(sample_names):
augmented_sample_names.append(name + "_" + str(i))
return augmented_sample_names
def build_multinomial_model(calls, uniform_for_unknown):
genotype_probs = defaultdict(int)
n_calls = float(len(calls))
unknown_positions = 0
for gt in calls:
if "." not in gt:
genotype_probs[gt] += 1.0 / n_calls
else:
unknown_positions += 1
if uniform_for_unknown and unknown_positions > 0:
remaining_prob = unknown_positions / n_calls
genotype_prob = remaining_prob / 3.0
for gt in (("0", "0"), ("0", "1"), ("1", "1")):
genotype_probs[gt] += genotype_prob
return MultinomialSampler(genotype_probs.items())
def augment_variant(variant, sampler, augmented_names):
augmented_samples = OrderedDict()
variant_calls = variant.sample_calls.values()
for name, gt in itertools.izip(augmented_names,
itertools.cycle(variant_calls)):
if "." in gt:
gt = sampler.sample()
augmented_samples[name] = gt
return variant._replace(sample_calls=augmented_samples) \
._replace(format="GT")
def augment_samples(reader, writer, orig_populations, augmented_populations, strategy):
sampler = MultinomialSampler([(("0", "0"), 1.0),
(("0", "1"), 1.0),
(("1", "1"), 1.0)])
for variant in reader:
augmented = []
for orig_pop, aug_pop in zip(orig_populations.itervalues(),
augmented_populations.itervalues()):
pop_variant = filter_samples(variant, orig_pop)
if strategy == "uniform-known":
sampler = build_multinomial_model(pop_variant.sample_calls.values(),
True)
elif strategy == "known":
sampler = build_multinomial_model(pop_variant.sample_calls.values(),
False)
pop_augmented = augment_variant(pop_variant, sampler, aug_pop)
augmented.append(pop_augmented)
variant = reduce(append_variants, augmented)
writer.write_variant(variant)
def parseargs():
parser = argparse.ArgumentParser("Humbaba - Augment Samples with Random Sampling")
input_vcf = parser.add_mutually_exclusive_group(required=True)
input_vcf.add_argument("--input-vcf",
type=str,
help="Input VCF file")
input_vcf.add_argument("--input-vcf-gz",
type=str,
help="Input Gzipped VCF file")
output_vcf = parser.add_mutually_exclusive_group(required=True)
output_vcf.add_argument("--output-vcf",
type=str,
help="Output VCF file")
output_vcf.add_argument("--output-vcf-gz",
type=str,
help="Output Gzipped VCF file")
parser.add_argument("--n-repeats",
type=int,
help="Number of times to repeat samples with randomly-sampled genotypes")
parser.add_argument("--sampling-strategy",
type=str,
default="uniform",
choices=["uniform",
"uniform-known",
"known"],
help="Sampling strategy to use when inferring unknown genotypes")
parser.add_argument("--input-populations",
type=str,
help="Population definitions")
parser.add_argument("--output-populations",
type=str,
help="Population definitions with augmented samples")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parseargs()
if args.input_vcf is not None:
reader = VCFReader(args.input_vcf,
compressed=False)
else:
reader = VCFReader(args.input_vcf_gz,
compressed=True)
if args.input_populations:
populations = read_populations(args.input_populations)
augmented_sample_names = []
augmented_populations = OrderedDict()
for pop, sample_names in populations.iteritems():
augmented = augment_sample_names(sample_names,
args.n_repeats)
augmented_populations[pop] = augmented
augmented_sample_names.extend(augmented)
else:
populations = OrderedDict([("all", reader.sample_names)])
augmented_sample_names = augment_sample_names(reader.sample_names,
args.n_repeats)
augmented_populations = OrderedDict([("all", augmented_sample_names)])
if args.output_vcf is not None:
writer = VCFWriter(args.output_vcf,
augmented_sample_names,
compressed=False)
else:
writer = VCFWriter(args.output_vcf_gz,
augmented_sample_names,
compressed=True)
augment_samples(reader,
writer,
populations,
augmented_populations,
args.sampling_strategy)
writer.close()
if args.output_populations:
write_populations(args.output_populations,
augmented_populations)
|
joachimmetz/dfvfs
|
tests/vfs/apfs_file_system.py
|
Python
|
apache-2.0
| 4,316
| 0.002549
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file system implementation using pyfsapfs."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.vfs import apfs_file_system
from tests import test_lib as shared_test_lib
class APFSFileSystemTest(shared_test_lib.BaseTestCase):
"""Tests the APFS file entry."""
_IDENTIFIER_PASSWORDS_TXT = 20
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['apfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
self._apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/apfs1',
parent=test_raw_path_spec)
self._apfs_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/',
parent=self._apfs_container_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = apfs_file_system.APFSFileSystem(
self._resolver_context, self._apfs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by path specification functionality."""
file_system = apfs_file_system.APFSFileSystem(
self._resolver_context, self._apfs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/passwords.txt',
identifier=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._apfs_container_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/bogus.txt',
parent=self._apfs_container_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
def testGetF
|
ileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
file_system = apfs_file_system.APFSFileSystem(
self._resolver_context, self._apfs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS,
identifier=self._IDENTIFIER_PASSWORDS_
|
TXT,
parent=self._apfs_container_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
# There is no way to determine the file_entry.name without a location string
# in the path_spec or retrieving the file_entry from its parent.
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/passwords.txt',
identifier=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._apfs_container_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'passwords.txt')
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/bogus.txt',
parent=self._apfs_container_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
# TODO: add tests for GetAPFSFileEntryByPathSpec function.
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = apfs_file_system.APFSFileSystem(
self._resolver_context, self._apfs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '')
if __name__ == '__main__':
unittest.main()
|
sulami/feed2maildir
|
feed2maildir/reader.py
|
Python
|
isc
| 778
| 0.007712
|
import feedparser
from multiprocessing.pool import ThreadPool
def fetch_and_parse_feed(args):
name, feed = args
return (name, feedparser.parse(feed))
class Reader:
"""Get updates on the feeds supplied"""
def __init__(self, feeds, silent=False, njobs=4):
self.feeds = []
self.silent = silent
|
with ThreadPool(processes=njobs) as pool:
for feed, f in pool.imap_unordered(fetch_and_parse_feed, feeds.items()):
if f.bozo:
self.output('WARNING: could not parse feed {}'.format(feed))
else:
f.feed_alias_name = feed # user provided
|
text
self.feeds.append(f)
def output(self, arg):
if not self.silent:
print(arg)
|
PatSunter/pyOTPA
|
TAZs-OD-Matrix/taz_files.py
|
Python
|
bsd-3-clause
| 1,176
| 0.004252
|
import csv
import osgeo.ogr
from osgeo import ogr, osr
EPSG_LAT_LON = 4326
def read_tazs_from_csv(csv_zone_locs_fname):
taz_tuples = []
tfile = open(csv_zone_locs_fname, 'rb')
treader = csv.reader(tfile, delimiter=',', quotechar="'")
for ii, row in enumerate(treader):
if ii == 0: continue
else:
taz_tuple = (row[0], row[1], row[2])
t
|
az_tuples.append(taz_tuple)
return taz_tuples
def read_tazs_from_shp(shp_zone_locs_fname):
taz_tuples = []
tazs_shp = osgeo.ogr.Open(shp_zone_locs_fname)
tazs_layer = tazs_shp.GetLayer(0)
src_srs = tazs_layer.GetSpatialRef()
target_srs = osr.SpatialReference()
target_srs.ImportFromEPSG(EPSG_LAT_LON)
tran
|
sform_to_lat_lon = osr.CoordinateTransformation(src_srs,
target_srs)
for taz_feat in tazs_layer:
taz_id = taz_feat.GetField("N")
taz_geom = taz_feat.GetGeometryRef()
taz_geom.Transform(transform_to_lat_lon)
taz_lat = taz_geom.GetX()
taz_lon = taz_geom.GetY()
taz_tuples.append((taz_id, taz_lat, taz_lon))
taz_feat.Destroy()
tazs_shp.Destroy()
return taz_tuples
|
stackforge/senlin
|
senlin/tests/unit/engine/test_environment.py
|
Python
|
apache-2.0
| 13,402
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import mock
import six
from senlin.common import exception
from senlin.engine import environment
from senlin.tests.unit.common import base
fake_env_str = """
parameters:
pa: va
pb: vb
custom_profiles:
prof_1: plugin_1
custom_policies:
policy_2: plugin_2
"""
class TestEnvironment(base.SenlinTestCase):
def test_create_global(self):
e = environment.Environment(is_global=True)
self.assertEqual({}, e.params)
self.assertEqual('profiles', e.profile_registry.regi
|
stry_name)
self.assertEqual('policies', e.policy_registry.registry_name)
self.assertEqual('drivers', e.driver_registry.registry_name)
self.assertEqual('endpoints', e.endpoint_registry.registry_name)
self.assertTrue(e.profile_registry.is_global)
self.assertTrue(e.policy_registry.is_global)
self.assertTrue(e.driver_registry.is_global)
self.assertTrue(e.endpo
|
int_registry.is_global)
def test_create_default(self):
ge = environment.global_env()
e = environment.Environment()
reg_prof = e.profile_registry
reg_plcy = e.policy_registry
reg_driv = e.driver_registry
reg_endp = e.endpoint_registry
self.assertEqual({}, e.params)
self.assertEqual('profiles', reg_prof.registry_name)
self.assertEqual('policies', reg_plcy.registry_name)
self.assertEqual('drivers', reg_driv.registry_name)
self.assertEqual('endpoints', reg_endp.registry_name)
self.assertFalse(reg_prof.is_global)
self.assertFalse(reg_plcy.is_global)
self.assertFalse(reg_driv.is_global)
self.assertFalse(reg_endp.is_global)
self.assertEqual('profiles', ge.profile_registry.registry_name)
self.assertEqual('policies', ge.policy_registry.registry_name)
self.assertEqual('drivers', ge.driver_registry.registry_name)
self.assertEqual('endpoints', ge.endpoint_registry.registry_name)
self.assertEqual(ge.profile_registry, reg_prof.global_registry)
self.assertEqual(ge.policy_registry, reg_plcy.global_registry)
self.assertEqual(ge.driver_registry, reg_driv.global_registry)
self.assertEqual(ge.endpoint_registry, reg_endp.global_registry)
def test_create_with_env(self):
env = {
'parameters': {
'p1': 'v1',
'p2': True,
},
'custom_profiles': {
'PROFILE_FOO': 'some.class',
'PROFILE_BAR': 'other.class',
},
'custom_policies': {
'POLICY_Alpha': 'package.alpha',
'POLICY_Beta': 'package.beta',
},
}
e = environment.Environment(env=env, is_global=True)
self.assertEqual('v1', e.params['p1'])
self.assertTrue(e.params['p2'])
self.assertEqual('some.class', e.get_profile('PROFILE_FOO'))
self.assertEqual('other.class', e.get_profile('PROFILE_BAR'))
self.assertEqual('package.alpha', e.get_policy('POLICY_Alpha'))
self.assertEqual('package.beta', e.get_policy('POLICY_Beta'))
def test_parse(self):
env = environment.Environment()
result = env.parse(fake_env_str)
self.assertEqual('va', result['parameters']['pa'])
self.assertEqual('vb', result['parameters']['pb'])
self.assertEqual('plugin_1', result['custom_profiles']['prof_1'])
self.assertEqual('plugin_2', result['custom_policies']['policy_2'])
# unknown sections
env_str = "variables:\n p1: v1"
err = self.assertRaises(ValueError, env.parse, env_str)
self.assertEqual('environment has unknown section "variables"',
six.text_type(err))
# omitted sections
env_str = "parameters:\n p1: v1"
result = env.parse(env_str)
self.assertEqual('v1', result['parameters']['p1'])
self.assertEqual({}, result['custom_profiles'])
self.assertEqual({}, result['custom_policies'])
def test_parse_empty(self):
env = environment.Environment()
result = env.parse(None)
self.assertEqual({}, result)
def test_load(self):
env = environment.Environment()
env.load({})
self.assertEqual({}, env.params)
self.assertEqual({}, env.profile_registry._registry)
self.assertEqual({}, env.policy_registry._registry)
self.assertEqual({}, env.driver_registry._registry)
env_dict = {
'parameters': {
'P': 'V'
},
'custom_profiles': {
'C1': 'class1',
},
'custom_policies': {
'C2': 'class2',
},
}
env.load(env_dict)
self.assertEqual('V', env.params['P'])
self.assertEqual('class1', env.get_profile('C1'))
self.assertEqual('class2', env.get_policy('C2'))
def test_check_plugin_name(self):
env = environment.Environment()
for pt in ['Profile', 'Policy', 'Driver', 'Endpoint']:
res = env._check_plugin_name(pt, 'abc')
self.assertIsNone(res)
ex = self.assertRaises(exception.InvalidPlugin,
env._check_plugin_name, pt, '')
self.assertEqual('%s type name not specified' % pt,
six.text_type(ex))
ex = self.assertRaises(exception.InvalidPlugin,
env._check_plugin_name, pt, None)
self.assertEqual('%s type name not specified' % pt,
six.text_type(ex))
for v in [123, {}, ['a'], ('b', 'c'), True]:
ex = self.assertRaises(exception.InvalidPlugin,
env._check_plugin_name, pt, v)
self.assertEqual('%s type name is not a string' % pt,
six.text_type(ex))
def test_register_and_get_profile(self):
plugin = mock.Mock()
env = environment.Environment()
ex = self.assertRaises(exception.ResourceNotFound,
env.get_profile, 'foo')
self.assertEqual("The profile_type 'foo' could not be found.",
six.text_type(ex))
env.register_profile('foo', plugin)
self.assertEqual(plugin, env.get_profile('foo'))
def test_get_profile_types(self):
env = environment.Environment()
plugin1 = mock.Mock(VERSIONS={'1.0': 'v'})
env.register_profile('foo-1.0', plugin1)
plugin2 = mock.Mock(VERSIONS={'1.2': 'v1'})
env.register_profile('bar-1.2', plugin2)
actual = env.get_profile_types()
self.assertIn(
{'name': 'foo', 'version': '1.0', 'support_status': {'1.0': 'v'}},
actual)
self.assertIn(
{'name': 'bar', 'version': '1.2', 'support_status': {'1.2': 'v1'}},
actual)
def test_register_and_get_policy(self):
plugin = mock.Mock()
env = environment.Environment()
ex = self.assertRaises(exception.ResourceNotFound,
env.get_policy, 'foo')
self.assertEqual("The policy_type 'foo' could not be found.",
six.text_type(ex))
env.register_policy('foo', plugin)
self.assertEqual(plugin, env.get_policy('foo'))
def test_get_policy_types(self):
env = environment.Environment()
plugin1 = mock.Mock(VERSIONS={'0.1': 'v'})
env.register_policy('foo-0.1', plugin1)
plugin2 = mock.Mock(VERSIONS={'0.1': '
|
flackr/quickopen
|
src/db_exception.py
|
Python
|
apache-2.0
| 665
| 0.003008
|
# Copyright 2011 Google Inc.
#
#
|
Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
|
See the License for the specific language governing permissions and
# limitations under the License.
from silent_exception import SilentException
class DBException(SilentException):
pass
|
msabramo/pyOpenSSL
|
OpenSSL/test/test_rand.py
|
Python
|
apache-2.0
| 6,054
| 0.00446
|
# Copyright (c) Frederick Dean
# See LICENSE for details.
"""
Unit tests for :py:obj:`OpenSSL.rand`.
"""
from unittest import main
import os
import stat
from OpenSSL.test.util import TestCase, b
from OpenSSL import rand
class RandTests(TestCase):
def test_bytes_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.bytes` raises :py:obj:`TypeError` if called with the wrong
number of arguments or with a non-:py:obj:`int` argument.
"""
self.assertRaises(TypeError, rand.bytes)
self.assertRaises(TypeError, rand.bytes, None)
self.assertRaises(TypeError, rand.bytes, 3, None)
# XXX Test failure of the malloc() in rand_bytes.
def test_bytes(self):
"""
Verify that we can obtain bytes from rand_bytes() and
that they are different each time. Test the parameter
of rand_bytes() for bad values.
"""
b1 = rand.bytes(50)
self.assertEqual(len(b1), 50)
b2 = rand.bytes(num_bytes=50) # parameter by name
self.assertNotEqual(b1, b2) # Hip, Hip, Horay! FIPS complaince
b3 = rand.bytes(num_bytes=0)
self.assertEqual(len(b3), 0)
exc = self.assertRaises(ValueError, rand.bytes, -1)
self.assertEqual(str(exc), "num_bytes must not be negative")
def test_add_wrong_args(self):
"""
When called with the wrong number of arguments, or with arguments not of
type :py:obj:`str` and :py:obj:`int`, :py:obj:`OpenSSL.rand.add` raises :py:obj:`TypeError`.
"""
self.assertRaises(TypeError, rand.add)
self.assertRaises(TypeError, rand.add, b("foo"), None)
self.assertRaises(TypeError, rand.add, None, 3)
self.assertRaises(TypeError, rand.add, b("foo"), 3, None)
def test_add(self):
"""
:py:obj:`OpenSSL.rand.add` adds entropy to the PRNG.
"""
rand.add(b('hamburger'), 3)
def test_seed_wrong_args(self):
"""
When called with the wrong number of arguments, or with a non-:py:obj:`str`
argument, :py:obj:`OpenSSL.rand.seed` raises :py:obj:`TypeError`.
"""
self.assertRaises(TypeError, rand.seed)
self.assertRaises(TypeError, rand.seed, None)
self.assertRaises(TypeError, rand.seed, b("foo"), None)
def test_seed(self):
"""
:py:obj:`OpenSSL.rand.seed` adds entropy to the PRNG.
"""
rand.seed(b('milk shake'))
def test_status_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.status` raises :py:obj:`TypeError` when called with any
arguments.
"""
self.assertRaises(TypeError, rand.status, No
|
ne)
def test_status(self):
"""
:py:obj:`OpenSSL.rand.status`
|
returns :py:obj:`True` if the PRNG has sufficient
entropy, :py:obj:`False` otherwise.
"""
# It's hard to know what it is actually going to return. Different
# OpenSSL random engines decide differently whether they have enough
# entropy or not.
self.assertTrue(rand.status() in (1, 2))
def test_egd_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.egd` raises :py:obj:`TypeError` when called with the wrong
number of arguments or with arguments not of type :py:obj:`str` and :py:obj:`int`.
"""
self.assertRaises(TypeError, rand.egd)
self.assertRaises(TypeError, rand.egd, None)
self.assertRaises(TypeError, rand.egd, "foo", None)
self.assertRaises(TypeError, rand.egd, None, 3)
self.assertRaises(TypeError, rand.egd, "foo", 3, None)
def test_egd_missing(self):
"""
:py:obj:`OpenSSL.rand.egd` returns :py:obj:`0` or :py:obj:`-1` if the
EGD socket passed to it does not exist.
"""
result = rand.egd(self.mktemp())
expected = (-1, 0)
self.assertTrue(
result in expected,
"%r not in %r" % (result, expected))
def test_cleanup_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.cleanup` raises :py:obj:`TypeError` when called with any
arguments.
"""
self.assertRaises(TypeError, rand.cleanup, None)
def test_cleanup(self):
"""
:py:obj:`OpenSSL.rand.cleanup` releases the memory used by the PRNG and returns
:py:obj:`None`.
"""
self.assertIdentical(rand.cleanup(), None)
def test_load_file_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.load_file` raises :py:obj:`TypeError` when called the wrong
number of arguments or arguments not of type :py:obj:`str` and :py:obj:`int`.
"""
self.assertRaises(TypeError, rand.load_file)
self.assertRaises(TypeError, rand.load_file, "foo", None)
self.assertRaises(TypeError, rand.load_file, None, 1)
self.assertRaises(TypeError, rand.load_file, "foo", 1, None)
def test_write_file_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.write_file` raises :py:obj:`TypeError` when called with the
wrong number of arguments or a non-:py:obj:`str` argument.
"""
self.assertRaises(TypeError, rand.write_file)
self.assertRaises(TypeError, rand.write_file, None)
self.assertRaises(TypeError, rand.write_file, "foo", None)
def test_files(self):
"""
Test reading and writing of files via rand functions.
"""
# Write random bytes to a file
tmpfile = self.mktemp()
# Make sure it exists (so cleanup definitely succeeds)
fObj = open(tmpfile, 'w')
fObj.close()
try:
rand.write_file(tmpfile)
# Verify length of written file
size = os.stat(tmpfile)[stat.ST_SIZE]
self.assertEquals(size, 1024)
# Read random bytes from file
rand.load_file(tmpfile)
rand.load_file(tmpfile, 4) # specify a length
finally:
# Cleanup
os.unlink(tmpfile)
if __name__ == '__main__':
main()
|
Tekco/django-pipeline
|
docs/conf.py
|
Python
|
mit
| 7,041
| 0.006678
|
# -*- coding: utf-8 -*-
#
# Pipeline documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 30 17:47:55 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-pipeline'
copyright = u'2011-2014, Timothée Peignier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version
|
= '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3.25'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
|
some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-pipelinedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-pipeline.tex', u'Pipeline Documentation',
u'Timothée Peignier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-pipeline', u'Pipeline Documentation',
[u'Timothée Peignier'], 1)
]
|
leapalazzolo/XSS
|
test/test_links.py
|
Python
|
mit
| 5,360
| 0.014179
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import mechanize
from links import links
class LinksTest(unittest.TestCase):
"""Test para 'links.py'"""
def test_obtener_parametros_de_la_url(self):
url_unlam = 'http://www.unlam.edu.ar/index.php'
url_unlam_con_parametros = 'http://www.unlam.edu.ar/index.php?seccion=-1&accion=buscador'
url_google_con_parametros = 'https://www.google.com.ar/?gfe_rd=cr&dcr=0&ei=eUXWWZPVGcb_8AfYso_wAw&gws_rd=ssl'
self.assertEqual(links.obtener_parametros_de_la_url(url_unlam_con_parametros),
{'seccion':['-1'], 'accion':['buscador']}
)
self.assertEqual(links.obtener_parametros_de_la_url(url_unlam),
{}
)
self.assertEqual(links.obtener_parametros_de_la_url(url_google_con_parametros),
{'gfe_rd':['cr'], 'dcr':['0'], 'ei':['eUXWWZPVGcb_8AfYso_wAw'], 'gws_rd':['ssl']}
)
def test_obtener_scripts_desde_url(self):
url_blogger = 'https://www.blogger.com/about/?r=2'
dominio_blogger = 'https'
archivo_html_blogger = open('test/blogger_html.txt', 'r')
html_blogger = archivo_html_blogger.read()
archivo_scripts_blogger_1 = open('test/blogger_script_1.txt', 'r')
scripts_blogger_1 = archivo_scripts_blogger_1.read()
archivo_scripts_blogger_2 = open('test/blogger_script_2.txt', 'r')
scripts_blogger_2 = archivo_scripts_blogger_2.read()
lista_scripts_blogger = [str(scripts_blogger_1), str(scripts_blogger_2)]
links._compilar_regex(r'(?!^//|\bhttp\b)[A-Za-z0-9_\-//]*\.\w*',
r'(?!^//|\bhttp\b)([A-Za-z0-9_\-\/]*\/[A-Za-z0-9_\-\.\/]*)',
r'.*\b' + 'www.blogger.com'.replace('www.', r'\.?') + r'\b(?!\.)'
)
self.assertNotEqual(links.obtener_scripts_desde_url(url_blogger, dominio_blogger, html_blogger),
lista_scripts_blogger
)
def test_obtener_link_valido(self):
links._compilar_regex(r'(?!^//)[A-Za-z0-9_\-//]*\.\w*',
'([A-Za-z0-9_\-\/]*\/[A-Za-z0-9_\-\.\/]*)',
r'.*\b' + 'www.blogger.com'.replace('www.', '\.?') + r'\b(?!\.)'
)
url_blogger = 'https://www.blogger.com/about/?r=2'
dominio_blogger = 'https'
link = '/go/createyourblog'
self.assertEqual(links.obtener_link_valido(url_blogger, link, dominio_blogger),
'https://www.blogger.com/go/createyourblog'
)
self.assertEqual(links.obtener_link_valido(url_blogger, '/', dominio_blogger),
'https://www.blogger.com/'
)
def test_obtener_entradas_desde_url(self):
url_unlam = 'http://alumno2.unlam.edu.ar/index.jsp?pageLand=registrarse'
html_unlam = open('test/unlam_html.txt', 'r').read()
parametros = links.obtener_entradas_desde_url(html_unlam)
parametro = parametros[0][0]['id']
self.assertEqual(parametro,
'docume'
)
def test_es_url_prohibida(self):
self.assertTrue(links.es_url_prohibida('http://example.com/asd/imagen.jpg'))
self.assertFalse(links.es_url_prohibida('http://example.com/asd/noespng.html'))
def test_es_url_valida(self):
self.assertFalse(links.es_url_valida('python.org'))
self.assertTrue(links.es_url_valida('https://www.python.org'))
def test_se_puede_acceder_a_url(self):
self.assertFalse(links.se_puede_acceder_a_url('https://sitioquenoesasfasdasda.org'))
self.assertTrue(links.se_puede_acceder_a_url('https://www.python.org'))
def test_abrir_url_en_navegador(self):
br =
|
mechanize.Browser()
links.configurar_navegador(br)
lista_cookies = links.obtener_cookies_validas('DXGlobalization_lang=en;DXGlobalization_locale=en-US;DXGlobalization_currency=ARS')
self.assertFalse(links.abrir_url_en_navegador(br, 'https://sitioquenoesasfasdasda.org'))
self.assertTrue(links.abrir_ur
|
l_en_navegador(br, 'https://www.python.org'))
self.assertTrue(links.abrir_url_en_navegador(br, 'https://cart.dx.com/'))
self.assertTrue(links.abrir_url_en_navegador(br, 'https://cart.dx.com/', lista_cookies))
def test_validar_formato_cookies(self):
lista_cookies = links.obtener_cookies_validas('DXGlobalization_lang=en;DXGlobalization_locale=en-US;DXGlobalization_currency=ARS')
#self.assertEqual(dict_cokies,
# {'DXGlobalization_lang':'en', 'DXGlobalization_locale':'en-US','DXGlobalization_currency':'ARS' }
# )
self.assertEqual(lista_cookies,
['DXGlobalization_lang=en', 'DXGlobalization_locale=en-US','DXGlobalization_currency=ARS' ]
)
self.assertFalse(links.obtener_cookies_validas('DXGlobalization_lang=en;'))
self.assertFalse(links.obtener_cookies_validas('DXGlobalization_lang='))
if __name__ == '__main__':
unittest.main()
|
badele/home-assistant
|
homeassistant/components/light/__init__.py
|
Python
|
mit
| 9,768
| 0
|
"""
homeassistant.components.light
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to interact with lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light/
"""
import logging
import os
import csv
from homeassistant.components import group, discovery, wink, isy994, zwave
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
import homeassistant.util as util
import homeassistant.util.color as color_util
DOMAIN = "light"
DEPENDENCIES = []
SCAN_INTERVAL = 30
GROUP_NAME_ALL_LIGHTS = 'all lights'
ENTITY_ID_ALL_LIGHTS = group.ENTITY_ID_FORMAT.format('all_lights')
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# integer that represents transition time in seconds to make change
ATTR_TRANSITION = "transition"
# lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_XY_COLOR = "xy_color"
ATTR_COLOR_TEMP = "color_temp"
# int with value 0 .. 255 representing brightness of the light
ATTR_BRIGHTNESS = "brightness"
# String representing a profile (built-in ones or external defined)
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# Apply an effect to the light, can be EFFECT_COLORLOOP
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_WHITE = "white"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {
wink.DISCOVER_LIGHTS: 'wink',
isy994.DISCOVER_LIGHTS: 'isy994',
discovery.SERVICE_HUE: 'hue',
zwave.DISCOVER_LIGHTS: 'zwave',
}
PROP_TO_ATTR = {
'brightness': ATTR_BRIGHTNESS,
'color_temp': ATTR_COLOR_TEMP,
'rgb_color': ATTR_RGB_COLOR,
'xy_color': ATTR_XY_COLOR,
}
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id=None):
""" Returns if the lights are on based on the statemachine. """
entity_id = entity_id or ENTITY_ID_ALL_LIGHTS
return hass.states.is_state(entity_id, STATE_ON)
# pylint: disable=too-many-arguments
def turn_on(hass, entity_id=None, transition=None, brightness=None,
rgb_color=None, xy_color=None, color_temp=None, profile=None,
flash=None, effect=None):
""" Turns all or specified light on. """
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PROFILE, profile),
(ATTR_TRANSITION, transition),
(ATTR_BRIGHTNESS, brightness),
(ATTR_RGB_COLOR, rgb_color),
(ATTR_XY_COLOR, xy_color),
(ATTR_COLOR_TEMP, color_temp),
(ATTR_FLASH, flash),
(ATTR_EFFECT, effect),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None, transition=None):
""" Turns all or specified light off. """
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_TRANSITION, transition),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
# pylint: disable=too-many-branches,
|
too-many-locals, too-many-statements
def setup(hass, config):
""" Exposes light control via statemachine and services. """
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, DISCOVERY_PLATFORMS,
GROUP_NAME_ALL_LIGHTS)
component.setup(config)
# Load built-in profiles and custom profiles
profile_paths = [os.path.join(os.path.dirname(__file__),
|
LIGHT_PROFILES_FILE),
hass.config.path(LIGHT_PROFILES_FILE)]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path) as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for profile_id, color_x, color_y, brightness in reader:
profiles[profile_id] = (float(color_x), float(color_y),
int(brightness))
except ValueError:
# ValueError if not 4 values per row
# ValueError if convert to float/int failed
_LOGGER.error(
"Error parsing light profiles from %s", profile_path)
return False
def handle_light_service(service):
""" Hande a turn light on or off service call. """
# Get and validate data
dat = service.data
# Convert the entity ids to valid light ids
target_lights = component.extract_from_service(service)
params = {}
transition = util.convert(dat.get(ATTR_TRANSITION), int)
if transition is not None:
params[ATTR_TRANSITION] = transition
if service.service == SERVICE_TURN_OFF:
for light in target_lights:
light.turn_off(**params)
for light in target_lights:
if light.should_poll:
light.update_ha_state(True)
return
# Processing extra data for turn light on request
# We process the profile first so that we get the desired
# behavior that extra service data attributes overwrite
# profile values
profile = profiles.get(dat.get(ATTR_PROFILE))
if profile:
*params[ATTR_XY_COLOR], params[ATTR_BRIGHTNESS] = profile
if ATTR_BRIGHTNESS in dat:
# We pass in the old value as the default parameter if parsing
# of the new one goes wrong.
params[ATTR_BRIGHTNESS] = util.convert(
dat.get(ATTR_BRIGHTNESS), int, params.get(ATTR_BRIGHTNESS))
if ATTR_XY_COLOR in dat:
try:
# xy_color should be a list containing 2 floats
xycolor = dat.get(ATTR_XY_COLOR)
# Without this check, a xycolor with value '99' would work
if not isinstance(xycolor, str):
params[ATTR_XY_COLOR] = [float(val) for val in xycolor]
except (TypeError, ValueError):
# TypeError if xy_color is not iterable
# ValueError if value could not be converted to float
pass
if ATTR_COLOR_TEMP in dat:
# color_temp should be an int of mirads value
colortemp = dat.get(ATTR_COLOR_TEMP)
# Without this check, a ctcolor with value '99' would work
# These values are based on Philips Hue, may need ajustment later
if isinstance(colortemp, int) and 154 <= colortemp <= 500:
params[ATTR_COLOR_TEMP] = colortemp
if ATTR_RGB_COLOR in dat:
try:
# rgb_color should be a list containing 3 ints
rgb_color = dat.get(ATTR_RGB_COLOR)
if len(rgb_color) == 3:
params[ATTR_RGB_COLOR] = [int(val) for val in rgb_color]
except (TypeError, ValueError):
# TypeError if rgb_color is not iterable
# ValueError if not all values can be converted to int
pass
if dat.get(ATTR_FLASH) in (FLASH_SHORT, FLASH_LONG):
params[ATTR_FLASH] = dat[ATTR_FLASH]
if dat.get(ATTR_EFFECT) in (EFFECT_COLORLOOP, EFFECT_WHITE):
params[ATTR_EFFECT] = dat[ATTR_EFFECT]
for light in target_lights:
light.turn_on(**params)
for light in target_lights:
if light.should_poll:
light.update_ha_state(True)
# Listen for light on and light off service calls
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_TURN_ON, handle_light_service,
descriptions.get(SERVICE_TURN_ON))
|
pabloest/piradio
|
ada_radio.py
|
Python
|
gpl-3.0
| 23,437
| 0.037377
|
#!/usr/bin/env python
#
# Raspberry Pi Internet Radio
# using an Adafruit RGB-backlit LCD plate for Raspberry Pi.
# $Id: ada_radio.py,v 1.37 2014/11/04 19:53:46 bob Exp $
#
# Author : Bob Rathbone
# Site : http://www.bobrathbone.com
#
# This program uses Music Player Daemon 'mpd'and it's client 'mpc'
# See http://mpd.wikia.com/wiki/Music_Player_Daemon_Wiki
#
#
# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html
#
# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.
# The authors shall not be liable for any loss or damage however caused.
#
import os
import subprocess
import sys
import time
import string
import datetime
import atexit
import shutil
from ada_lcd_class import Adafruit_lcd
from time import strftime
# Class imports
from radio_daemon import Daemon
from radio_class import Radio
from log_class import Log
from rss_class import Rss
UP = 0
DOWN = 1
CurrentStationFile = "/var/lib/radiod/current_station"
CurrentTrackFile = "/var/lib/radiod/current_track"
CurrentFile = CurrentStationFile
# Instantiate classes
log = Log()
radio = Radio()
rss = Rss()
lcd = Adafruit_lcd()
# Register exit routine
def finish():
lcd.clear()
radio.execCommand("umount /media > /dev/null 2>&1")
radio.execCommand("umount /share > /dev/null 2>&1")
lcd.line1("Radio stopped")
atexit.register(finish)
# Daemon class
class MyDaemon(Daemon):
def run(self):
global CurrentFile
log.init('radio')
progcall = str(sys.argv)
log.message('Radio running pid ' + str(os.getpid()), log.INFO)
log.message("Radio " + progcall + " daemon version " + radio.getVersion(), log.INFO)
hostname = exec_cmd('hostname')
ipaddr = exec_cmd('hostname -I')
log.message("IP " + ipaddr, log.INFO)
myos = exec_cmd('uname -a')
log.message(myos, log.INFO)
# Display daemon pid on the LCD
message = "Radio pid " + str(os.getpid())
lcd.line1(message)
lcd.line2("IP " + ipaddr)
time.sleep(4)
log.message("Restarting MPD", log.INFO)
lcd.line2("Starting MPD")
radio.start()
log.message("MPD started", log.INFO)
mpd_version = radio.execMpcCommand("version")
log.message(mpd_version, log.INFO)
lcd.line1("Radio ver "+ radio.getVersion())
lcd.scroll2(mpd_version,no_interrupt)
time.sleep(1)
reload(lcd,radio)
radio.play(get_stored_id(CurrentFile))
log.message("Current ID = " + str(radio.getCurrentID()), log.INFO)
# Main processing loop
count = 0
while True:
get_switch_states(lcd,radio,rss)
radio.setSwitch(0)
display_mode = radio.getDisplayMode()
lcd.setScrollSpeed(0.3) # Scroll speed normal
ipaddr = exec_cmd('hostname -I')
# Shutdown command issued
if display_mode == radio.MODE_SHUTDOWN:
displayShutdown(lcd)
while True:
time.sleep(1)
if ipaddr is "":
lcd.line1("No IP network")
elif display_mode == radio.MODE_TIME:
displayTime(lcd,radio)
if radio.muted():
msg = "Sound muted"
if radio.getStreaming():
msg = msg + ' *'
lcd.line2(msg)
else:
display_current(lcd,radio)
elif display_mode == radio.MODE_SEARCH:
display_search(lcd,radio)
elif display_mode == radio.MODE_SOURCE:
display_source_se
|
lect(lcd,radio)
elif display_mode == radio.MODE_OPTIONS:
display_options(lcd,radio)
elif display_mode == radio.MODE_IP:
lcd.line2("Radio v" + radio.getVersion())
if ipaddr is "":
lcd.line1("No IP network")
else:
lcd.scroll1("IP " + ipaddr, interrupt)
elif display_mode == rad
|
io.MODE_RSS:
displayTime(lcd,radio)
display_rss(lcd,rss)
elif display_mode == radio.MODE_SLEEP:
displayTime(lcd,radio)
display_sleep(lcd,radio)
time.sleep(0.3)
# Timer function
checkTimer(radio)
# Check state (pause or play)
checkState(radio)
# Alarm wakeup function
if display_mode == radio.MODE_SLEEP and radio.alarmFired():
log.message("Alarm fired", log.INFO)
unmuteRadio(lcd,radio)
displayWakeUpMessage(lcd)
radio.setDisplayMode(radio.MODE_TIME)
if radio.volumeChanged():
lcd.line2("Volume " + str(radio.getVolume()))
time.sleep(0.5)
time.sleep(0.1)
def status(self):
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "radiod status: not running"
log.message(message, log.INFO)
print message
else:
message = "radiod running pid " + str(pid)
log.message(message, log.INFO)
print message
return
# End of class overrides
# Interrupt scrolling LCD routine
def interrupt():
global lcd
global radio
global rss
interrupt = get_switch_states(lcd,radio,rss)
# Rapid display of timer
if radio.getTimer() and not interrupt:
displayTime(lcd,radio)
interrupt = checkTimer(radio)
if radio.volumeChanged():
lcd.line2("Volume " + str(radio.getVolume()))
time.sleep(0.5)
if not interrupt:
interrupt = checkState(radio)
return interrupt
def no_interrupt():
return False
# Call back routine called by switch events
def switch_event(switch):
global radio
radio.setSwitch(switch)
return
# Check switch states
def get_switch_states(lcd,radio,rss):
interrupt = False # Interrupt display
display_mode = radio.getDisplayMode()
input_source = radio.getSource()
option = radio.getOption()
if lcd.buttonPressed(lcd.MENU):
log.message("MENU switch mode=" + str(display_mode), log.DEBUG)
if radio.muted():
unmuteRadio(lcd,radio)
display_mode = display_mode + 1
if display_mode > radio.MODE_LAST:
display_mode = radio.MODE_TIME
if display_mode == radio.MODE_RSS and not radio.alarmActive():
if not rss.isAvailable():
display_mode = display_mode + 1
else:
lcd.line2("Getting RSS feed")
radio.setDisplayMode(display_mode)
log.message("New mode " + radio.getDisplayModeString()+
"(" + str(display_mode) + ")", log.DEBUG)
# Shutdown if menu button held for > 3 seconds
MenuSwitch = lcd.buttonPressed(lcd.MENU)
count = 15
while MenuSwitch:
time.sleep(0.2)
MenuSwitch = lcd.buttonPressed(lcd.MENU)
count = count - 1
if count < 0:
log.message("Shutdown", log.DEBUG)
MenuSwitch = False
radio.setDisplayMode(radio.MODE_SHUTDOWN)
if radio.getUpdateLibrary():
update_library(lcd,radio)
radio.setDisplayMode(radio.MODE_TIME)
elif radio.getReload():
source = radio.getSource()
log.message("Reload " + str(source), log.INFO)
lcd.line2("Please wait ")
reload(lcd,radio)
radio.setReload(False)
radio.setDisplayMode(radio.MODE_TIME)
elif radio.optionChanged():
#radio.setDisplayMode(radio.MODE_TIME)
#radio.optionChangedFalse()
log.message("optionChanged", log.DEBUG)
if radio.alarmActive() and not radio.getTimer() and option == radio.ALARMSET:
radio.setDisplayMode(radio.MODE_SLEEP)
radio.mute()
else:
radio.setDisplayMode(radio.MODE_TIME)
radio.optionChangedFalse()
elif radio.loadNew():
log.message("Load new search=" + str(radio.getSearchIndex()), log.DEBUG)
radio.playNew(radio.getSearchIndex())
radio.setDisplayMode(radio.MODE_TIME)
time.sleep(0.2)
interrupt = True
elif lcd.buttonPressed(lcd.UP):
log.message("UP switch", log.DEBUG)
if display_mode != radio.MODE_SLEEP:
radio.unmute()
if display_mode == radio.MODE_SOURCE:
radio.toggleSource()
radio.setReload(True)
elif display_mode == radio.MODE_SEARCH:
scroll_search(radio,UP)
elif display_mode == radio.MODE_OPTIONS:
cycle_options(radio,UP)
else:
radio.channelUp()
if display_mode == radio.MODE_RSS:
radio.setDisplayMode(radio.MODE_TIME)
interrupt = True
else:
DisplayExitMessage(lcd)
elif lcd.buttonPressed(lcd.DOWN):
log.message("DOWN switch", log.DEBUG)
if display_mode != radio.MODE_SLEEP:
radio.unmute()
if display_mode == radio.MODE_SOURCE:
radio.toggleSource()
radio.setReload(True)
elif display_mode == radio.MODE_SEARCH:
scroll_search(radio,
|
lucasdavid/drf-base
|
src/authority/urls.py
|
Python
|
mit
| 518
| 0
|
from infrastructure.routers import Router
from . import views
r = Router()
r.register('users', views.UsersViewSet) \
.register('groups', views.GroupsViewSet,
base_name='user-groups',
parents_query_lookups=['user'])
r.register('grou
|
ps', views.GroupsViewSet) \
.register('permissions', views.PermissionsViewSet,
base_name='group-permissions',
parents_query_lookups=['group'])
r.register('permissions', views.PermissionsViewSet
|
)
urlpatterns = r.urls
|
HEP-DL/dl_data_validation_toolset
|
dl_data_validation_toolset/framework/report_gen/individual.py
|
Python
|
mit
| 1,048
| 0.009542
|
import logging
from ..report.individual import IndividualReport
class IndividualGenerator(object):
logger = logging.getLogger("ddvt.rep_gen.ind")
def __init__(self, test):
self.test = test
async def generate(self, parent):
test_group = None
try:
test_group = self.test(parent.filename)
except OSError as e:
parent.report.valid = False
parent.report.reports.append(IndividualReport("FileValid", 0,
|
{'error': str(e)}))
|
return
for test in test_group._tests_:
self.logger.info("Starting Test: {}".format(test))
try:
result, status = getattr(test_group, test)()
parent.report.reports.append(IndividualReport(test, status, result))
# TODO: Figure out what to do next
except Exception as e:
self.logger.warning("failed test")
parent.report.valid = False
parent.report.reports.append(IndividualReport(test, 0,
{'error': str(e)}))
|
drptbl/webium
|
tests/alert_page/test_switch_to_new_window.py
|
Python
|
apache-2.0
| 736
| 0
|
from unittest import TestCase
from nose.tools import assert_false, ok_, eq_
from tests.alert_page import AlertPage
from webium.windows_handler import WindowsHa
|
ndler
class TestSwitchToNewWindow(TestCase):
def test_switch_to_new_window(self):
page = AlertPage()
handler = WindowsHandler()
page.open()
pa
|
rent = handler.active_window
handler.save_window_set()
assert_false(handler.is_new_window_present())
page.open_new_window_link.click()
ok_(handler.is_new_window_present())
new = handler.new_window
handler.switch_to_new_window()
eq_(new, handler.active_window)
handler.drop_active_window()
eq_(parent, handler.active_window)
|
thatblstudio/svnScripts
|
ignore.py
|
Python
|
mit
| 562
| 0.009025
|
# coding=utf-8
# Created by bl 2015/10/30.
import os
import shutil
basePath = os.getcwd()
pathList = list()
# 获取目录
for dirName in os.listdir(basePath):
path = os.path.join(basePath, dirName)
if os.path.isdir(path):
pathLis
|
t.append(path)
# print pathList
for path in pathList:
shutil.copy(basePath+"\ignore.myignore",path+"\ignore.myignore")
os.chdir(path)
os.system('svn propdel svn:global-ignores')
|
os.system('svn propset svn:ignore -F ignore.myignore .')
os.remove(path+"\ignore.myignore")
os.system('pause')
|
bslatkin/pycon2014
|
lib/asyncio-0.4.1/tests/test_selector_events.py
|
Python
|
apache-2.0
| 62,747
| 0.000096
|
"""Tests for selector_events.py"""
import collections
import errno
import gc
import pprint
import socket
import sys
import unittest
import unittest.mock
try:
import ssl
except ImportError:
ssl = None
import asyncio
from asyncio import selectors
from asyncio import test_utils
from asyncio.selector_events import BaseSelectorEventLoop
from asyncio.selector_events import _SelectorTransport
from asyncio.selector_events import _SelectorSslTransport
from asyncio.selector_events import _SelectorSocketTransport
from asyncio.selector_events import _SelectorDatagramTransport
MOCK_ANY = unittest.mock.ANY
class TestBaseSelectorEventLoop(BaseSelectorEventLoop):
def _make_self_pipe(self):
self._ssock = unittest.mock.Mock()
self._csock = unittest.mock.Mock()
self._internal_fds += 1
def list_to_buffer(l=()):
return bytearray().join(l)
class BaseSelectorEventLoopTests(unittest.TestCase):
def setUp(self):
selector = unittest.mock.Mock()
self.loop = TestBaseSelectorEventLoop(selector)
def test_make_socket_transport(self):
m = unittest.mock.Mock()
self.loop.add_reader = unittest.mock.Mock()
transport = self.loop._make_socket_transport(m, asyncio.Protocol())
self.assertIsInstance(transport, _SelectorSocketTransport)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_make_ssl_transport(self):
m = unittest.mock.Mock()
self.loop.add_reader = unittest.mock.Mock()
self.loop.add_writer = unittest.mock.Mock()
self.loop.remove_reader = unittest.mock.Mock()
self.loop.remove_writer = unittest.mock.Mock()
waiter = asyncio.Future(loop=self.loop)
transport = self.loop._make_ssl_transport(
m, asyncio.Protocol(), m, waiter)
self.assertIsInstance(transport, _SelectorSslTransport)
@unittest.mock.patch('asyncio.selector_events.ssl', None)
def test_make_ssl_transport_without_ssl_error(self):
m = unittest.mock.Mock()
self.loop.add_reader = unittest.mock.Mock()
self.loop.add_writer = unittest.mock.Mock()
self.loop.remove_reader = unittest.mock.Mock()
self.loop.remove_writer = unittest.mock.Mock()
with self.assertRaises(RuntimeError):
self.loop._make_ssl_transport(m, m, m, m)
def test_close(self):
ssock = self.loop._ssock
ssock.fileno.return_value = 7
csock = self.loop._csock
csock.fileno.return_value = 1
remove_reader = self.loop.remove_reader = unittest.mock.Mock()
self.loop._selector.close()
self.loop._selector = selector = unittest.mock.Mock()
self.loop.close()
self.assertIsNone(self.loop._selector)
self.assertIsNone(self.loop._csock)
self.assertIsNone(self.loop._ssock)
selector.close.assert_called_with()
ssock.close.assert_called_with()
csock.close.assert_called_with()
remove_reader.assert_called_with(7)
self.loop.close()
self.loop.close()
def test_close_no_selector(self):
ssock = self.loop._ssock
csock = self.loop._csock
remove_reader = self.loop.remove_reader = unittest.mock.Mock()
self.loop._selector.close()
self.loop._selector = None
self.loop.close()
self.assertIsNone(self.loop._selector)
self.assertFalse(ssock.close.called)
self.assertFalse(csock.close.called)
self.assertFalse(remove_reader.called)
def test_socketpair(self):
|
self.assertRaises(NotImplementedError, self
|
.loop._socketpair)
def test_read_from_self_tryagain(self):
self.loop._ssock.recv.side_effect = BlockingIOError
self.assertIsNone(self.loop._read_from_self())
def test_read_from_self_exception(self):
self.loop._ssock.recv.side_effect = OSError
self.assertRaises(OSError, self.loop._read_from_self)
def test_write_to_self_tryagain(self):
self.loop._csock.send.side_effect = BlockingIOError
self.assertIsNone(self.loop._write_to_self())
def test_write_to_self_exception(self):
self.loop._csock.send.side_effect = OSError()
self.assertRaises(OSError, self.loop._write_to_self)
def test_sock_recv(self):
sock = unittest.mock.Mock()
self.loop._sock_recv = unittest.mock.Mock()
f = self.loop.sock_recv(sock, 1024)
self.assertIsInstance(f, asyncio.Future)
self.loop._sock_recv.assert_called_with(f, False, sock, 1024)
def test__sock_recv_canceled_fut(self):
sock = unittest.mock.Mock()
f = asyncio.Future(loop=self.loop)
f.cancel()
self.loop._sock_recv(f, False, sock, 1024)
self.assertFalse(sock.recv.called)
def test__sock_recv_unregister(self):
sock = unittest.mock.Mock()
sock.fileno.return_value = 10
f = asyncio.Future(loop=self.loop)
f.cancel()
self.loop.remove_reader = unittest.mock.Mock()
self.loop._sock_recv(f, True, sock, 1024)
self.assertEqual((10,), self.loop.remove_reader.call_args[0])
def test__sock_recv_tryagain(self):
f = asyncio.Future(loop=self.loop)
sock = unittest.mock.Mock()
sock.fileno.return_value = 10
sock.recv.side_effect = BlockingIOError
self.loop.add_reader = unittest.mock.Mock()
self.loop._sock_recv(f, False, sock, 1024)
self.assertEqual((10, self.loop._sock_recv, f, True, sock, 1024),
self.loop.add_reader.call_args[0])
def test__sock_recv_exception(self):
f = asyncio.Future(loop=self.loop)
sock = unittest.mock.Mock()
sock.fileno.return_value = 10
err = sock.recv.side_effect = OSError()
self.loop._sock_recv(f, False, sock, 1024)
self.assertIs(err, f.exception())
def test_sock_sendall(self):
sock = unittest.mock.Mock()
self.loop._sock_sendall = unittest.mock.Mock()
f = self.loop.sock_sendall(sock, b'data')
self.assertIsInstance(f, asyncio.Future)
self.assertEqual(
(f, False, sock, b'data'),
self.loop._sock_sendall.call_args[0])
def test_sock_sendall_nodata(self):
sock = unittest.mock.Mock()
self.loop._sock_sendall = unittest.mock.Mock()
f = self.loop.sock_sendall(sock, b'')
self.assertIsInstance(f, asyncio.Future)
self.assertTrue(f.done())
self.assertIsNone(f.result())
self.assertFalse(self.loop._sock_sendall.called)
def test__sock_sendall_canceled_fut(self):
sock = unittest.mock.Mock()
f = asyncio.Future(loop=self.loop)
f.cancel()
self.loop._sock_sendall(f, False, sock, b'data')
self.assertFalse(sock.send.called)
def test__sock_sendall_unregister(self):
sock = unittest.mock.Mock()
sock.fileno.return_value = 10
f = asyncio.Future(loop=self.loop)
f.cancel()
self.loop.remove_writer = unittest.mock.Mock()
self.loop._sock_sendall(f, True, sock, b'data')
self.assertEqual((10,), self.loop.remove_writer.call_args[0])
def test__sock_sendall_tryagain(self):
f = asyncio.Future(loop=self.loop)
sock = unittest.mock.Mock()
sock.fileno.return_value = 10
sock.send.side_effect = BlockingIOError
self.loop.add_writer = unittest.mock.Mock()
self.loop._sock_sendall(f, False, sock, b'data')
self.assertEqual(
(10, self.loop._sock_sendall, f, True, sock, b'data'),
self.loop.add_writer.call_args[0])
def test__sock_sendall_interrupted(self):
f = asyncio.Future(loop=self.loop)
sock = unittest.mock.Mock()
sock.fileno.return_value = 10
sock.send.side_effect = InterruptedError
self.loop.add_writer = unittest.mock.Mock()
self.loop._sock_sendall(f, False, sock, b'data')
self.assertEqual(
(10, self.loop._sock_sendall, f, True, sock, b'data'),
self.loop.add_writer.call_args[0])
def test__sock_sendall_exception(self):
f = asyncio.Fu
|
TracyWebTech/django-revproxy
|
tests/settings.py
|
Python
|
mpl-2.0
| 1,241
| 0.000806
|
SECRET_KEY = 'asdf'
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'revproxy',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tests.urls'
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplat
|
es',
'APP_DIRS': True,
'DIRS': TEMPLATE_DIRS,
},
]
LOGGING = {
'version': 1,
'handlers': {
'null': {
'level': 'DEBUG',
|
'class': 'logging.NullHandler',
},
},
'loggers': {
'revproxy': {
'handlers': ['null'],
'propagate': False,
},
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.