repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
mtik00/obfuscator | docs/auto-generate.py | Python | gpl-2.0 | 4,778 | 0.003139 | #!/usr/bin/env python
"""
This script is used to create default documentation for a library.
"""
# Imports ######################################################################
import os
import re
import argparse
# Metadata #####################################################################
__author__ = "Timothy McFadden"
__date__ = "09/05/2014"
__copyright__ = "Timothy McFadden, 2014"
__license__ = "GPLv2"
__version__ = "0.03"
# Globals ######################################################################
# Base directory to search for modules
LIBDIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'lib', 'obfuscator'))
# Base directory to put auto-generated doc files.
DOCDIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'rst', 'lib'))
# The auto-generated index file (you'll need to add this to a TOC)
INDEX_FILE = "rst/auto.rst"
# Only include LIBDIR directories not matching this search.
IGNORE_DIR = re.compile('^.*\\\\tests(\\\\)?', re.IGNORECASE)
def remove_directory(top, remove_top=True):
'''
Removes all files and directorie | s, bottom-up.
@type top: str
@param top: The top-level directory you want removed
@type remove_top: bool
@param remove_top: Whether or not to remove the top director | y
'''
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if remove_top:
os.rmdir(top)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--noforce', help="Don't delete DOCDIR before generating documentation", action="store_true", default=False)
parser.add_argument('--libdir', help="Base directory for library to document", default=LIBDIR)
parser.add_argument('--docdir', help="Base directory to store generated .rst files", default=DOCDIR)
parser.add_argument('--index', help="The auto-generated index file (you'll need to add this to a TOC)", default=INDEX_FILE)
args = parser.parse_args()
if not args.noforce:
remove_directory(args.docdir, remove_top=False)
docdir_root = os.path.split(args.docdir)[1] # We'll skip this directory
index = [] # Keep track of the files to put into the index
for root, dirs, files in os.walk(args.libdir):
module_dir = None
for fname in [x for x in files if x.endswith('.py')]:
docfile = None
if (module_dir is None) and ('__init__.py' in files):
# import pdb; pdb.set_trace() # TODO
module_dir = root[len(args.libdir) + 1:] or os.path.basename(root) # 'obfuscator'
if fname == "__init__.py":
# Treat modules a little differently
module_name = module_dir.replace('\\', '.')
docfile_name = os.path.split(root)[1]
docdir = os.path.join(args.docdir, module_dir, '..')
docfile = os.path.abspath(os.path.join(docdir, "%s.rst" % docfile_name))
index_entry = ('lib/%s' % module_dir).replace('\\', '/')
automodule = module_name
elif (os.path.split(root)[1] != docdir_root) and ('__init__.py' in files):
# module_dir = root[len(args.libdir) + 1:]
module_name = fname
docfile_name = os.path.splitext(fname)[0]
docdir = os.path.join(args.docdir, module_dir)
docfile = os.path.abspath(os.path.join(docdir, "%s.rst" % docfile_name))
index_entry = ('lib/%s/%s' % (module_dir, docfile_name)).replace('\\', '/')
automodule = '.'.join([docdir[len(args.docdir) + 1:], docfile_name]).replace('\\', '.')
if not docfile:
continue
# Make sure the path exists
if not os.path.isdir(os.path.dirname(docfile)):
os.makedirs(os.path.dirname(docfile))
# Don't re-generate an already generated (or hand-created) file
if not os.path.isfile(docfile):
with open(docfile, 'wb') as fh:
fh.write("%s\n" % automodule)
fh.write("=" * len(automodule))
fh.write("\n\n")
fh.write(".. automodule:: %s\n" % automodule)
fh.write(' :members:\n')
index.append(index_entry)
with open(args.index, 'wb') as fh:
fh.write("Auto Generated API Documentation\n")
fh.write("================================\n\n")
fh.write("Contents:\n\n")
fh.write(".. toctree::\n")
fh.write(" :maxdepth: 2\n\n")
for item in index:
fh.write(" %s\n" % item)
|
prometheanfire/openstack-guest-agents-unix | tests/test_kms.py | Python | apache-2.0 | 3,525 | 0.000284 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
KMS activation tester
"""
import os
import unittest
from cStringIO import StringIO
import commands.redhat.kms
class TestKMSUpdates(unittest.TestCase):
def test_redhat_up2date(self):
"""Test updating up2date config for Red Hat"""
outfiles = commands.redhat.kms.configure_up2date([
'proxy1.example.com', 'proxy2.example.com'])
self.assertEqual(outfiles['/etc/sysconfig/rhn/up2date'], '\n'.join([
'# Automatically generated Red Hat Update Agent config file, '
'do not edit.',
'# Format: 1.0',
'versionOverride[comment]=Override the automatically determined '
'system version',
'versionOverride=',
'',
'enableProxyAuth[comment]=To use an authenticated proxy or not',
'enableProxyAuth=0',
'',
'networkRetries[comment]=Number of atte | mpts to make at network '
'connections before giving up',
'networkRetries=5',
'',
'hostedWhitelist[comment]=None',
'hostedWhitelist=',
'',
'enableProxy[comment]=Use a HTTP Proxy',
'enableProxy=0',
'',
| 'serverURL[comment]=Remote server URL',
'serverURL=https://proxy1.example.com/XMLRPC;'
'https://proxy2.example.com/XMLRPC;',
'',
'proxyPassword[comment]=The password to use for an authenticated '
'proxy',
'proxyPassword=',
'',
'noSSLServerURL[comment]=None',
'noSSLServerURL=http://proxy1.example.com/XMLRPC;'
'http://proxy2.example.com/XMLRPC;',
'',
'proxyUser[comment]=The username for an authenticated proxy',
'proxyUser=',
'',
'disallowConfChanges[comment]=Config options that can not be '
'overwritten by a config update action',
'disallowConfChanges=noReboot;sslCACert;useNoSSLForPackages;'
'noSSLServerURL;serverURL;disallowConfChanges;',
'',
'sslCACert[comment]=The CA cert used to verify the ssl server',
'sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT',
'',
'debug[comment]=Whether or not debugging is enabled',
'debug=0',
'',
'httpProxy[comment]=HTTP proxy in host:port format, e.g. '
'squid.redhat.com:3128',
'httpProxy=',
'',
'systemIdPath[comment]=Location of system id',
'systemIdPath=/etc/sysconfig/rhn/systemid',
'',
'noReboot[comment]=Disable the reboot action',
'noReboot=0']) + '\n')
if __name__ == "__main__":
agent_test.main()
|
spreeker/democracygame | external_apps/docutils-snapshot/test/functional/tests/standalone_rst_latex.py | Python | bsd-3-clause | 259 | 0 | exec(open('functional/tests/_standalone_rst_defaults.py').read())
# Source and destination file names.
test_source | = "standalone_rst_latex.txt"
test_destination = "standalone_rst_latex.tex"
# Keyword parameters passed to publish_file.
writer_name = " | latex"
|
msullivan/advent-of-code | 2019/5b.py | Python | mit | 1,470 | 0.002041 | #!/usr/bin/env python3
import sys
def run(p, input):
ip = 0
output = []
while True:
instr = p[ip]
def read(i):
mode = (instr // (10**(1+i))) % 10
return p[p[ip+i]] if mode == 0 else p[ip+i]
if instr % 100 == 1:
p[p[ip+3]] = read(1) + read(2)
ip += 4
elif instr % 100 == 2:
p[p[ip+3]] = read(1) * read(2)
ip += 4
elif instr % 100 == 3:
p[p[ip+1]] = input.pop(0)
| ip += 2
elif instr % 100 == 4:
output.append(read(1))
| ip += 2
elif instr % 100 == 5:
if read(1) != 0:
ip = read(2)
else:
ip += 3
elif instr % 100 == 6:
if read(1) == 0:
ip = read(2)
else:
ip += 3
elif instr % 100 == 7:
if read(1) < read(2):
p[p[ip+3]] = 1
else:
p[p[ip+3]] = 0
ip += 4
elif instr % 100 == 8:
if read(1) == read(2):
p[p[ip+3]] = 1
else:
p[p[ip+3]] = 0
ip += 4
elif instr % 100 == 99:
break
print("OUT", output)
def main(args):
data = [s.strip() for s in sys.stdin]
p = [int(x) for x in data[0].split(",")]
op = list(p)
run(p, [5])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
termie/pupa | nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py | Python | apache-2.0 | 6,625 | 0.000906 | # Copyright (c) 2011 NTT.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# Tables to alter
#
networks = Table('networks', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('injected', Boolean(create_constraint=True, name=None)),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('netmask',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('bridge',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('gateway',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('broadcast',
String(length=255, convert_unicode= | False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('dns',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vlan', Integer()),
Column('vpn_public_address',
String(length=255, convert_unicode=False | , assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('vpn_public_port', Integer()),
Column('vpn_private_address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('dhcp_start',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('cidr_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('ra_server', String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column(
'label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('network_id',
Integer(),
ForeignKey('networks.id'),
nullable=True),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=True),
Column('allocated', Boolean(create_constraint=True, name=None)),
Column('leased', Boolean(create_constraint=True, name=None)),
Column('reserved', Boolean(create_constraint=True, name=None)),
Column("addressV6", String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column("netmaskV6", String(length=3,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column("gatewayV6", String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
)
#
# New Tables
#
# None
#
# Columns to add to existing tables
#
networks_netmask_v6 = Column(
'netmask_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
# Alter column name
networks.c.ra_server.alter(name='gateway_v6')
# Add new column to existing table
networks.create_column(networks_netmask_v6)
# drop existing columns from table
fixed_ips.c.addressV6.drop()
fixed_ips.c.netmaskV6.drop()
fixed_ips.c.gatewayV6.drop()
|
msbeta/apollo | modules/tools/open_space_visualization/hybrid_a_star_visualizer.py | Python | apache-2.0 | 6,581 | 0.00152 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from hybrid_a_star_python_interface import *
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import animation
import numpy as np
import time
import math
def HybridAStarPlan(visualize_flag):
# initialze object
HybridAStar = HybridAStarPlanner()
# parameter(except max, min and car size is defined in proto)
num_output_buffer = 100000
sx = -8
sy = 4
sphi = 0.0
scenario = "backward"
# scenario = "parallel"
if scenario == "backward":
# for parking space 11543 in sunnyvale_with_two_offices
left_boundary_x = (
c_double * 3)(*[-13.6407054776, 0.0, 0.0515703622475])
left_boundary_y = (
c_double * 3)(*[0.0140634663703, 0.0, -5.15258191624])
down_boundary_x = (c_double * 2)(*[0.0515703622475, 2.8237895441])
down_boundary_y = (c_double * 2)(*[-5.15258191624, -5.15306980547])
right_boundary_x = (
c_double * 3)(*[2.8237895441, 2.7184833539, 16.3592013995])
right_boundary_y = (
c_double * 3)(*[-5.15306980547, -0.0398078878812, -0.011889513383])
up_boundary_x = (c_double * 2)(*[16.3591910364, -13.6406951857])
up_boundary_y = (c_double * 2)(*[5.60414234644, 5.61797800844])
# obstacles(x, y, size)
HybridAStar.AddVirtualObstacle(left_boundary_x, left_boundary_y, 3)
HybridAStar.AddVirtualObstacle(
down_boundary_x, down_boundary_y, 2)
HybridAStar.AddVirtualObstacle(
right_boundary_x, right_boundary_y, 3)
HybridAStar.AddVirtualObstacle(
up_boundary_x, up_boundary_y, 2)
ex = 1.359
ey = -3.86443643718
ephi = 1.581
XYbounds = [-13.6406951857, 16.3591910364, -
5.15258191624, 5.61797800844]
x = (c_double * num_output_buffer)()
y = (c_double * num_output_buffer)()
phi = (c_double * num_output_buffer)()
v = (c_double * num_output_buffer)()
a = (c_double * num_output_buffer)()
steer = (c_double * num_output_buffer)()
size = (c_ushort * 1)()
XYbounds_ctype = (c_double * 4)(*XYbounds)
start = time.time()
print("planning start")
success = True
if not HybridAStar.Plan(sx, sy, sphi, ex, ey, ephi, XYbounds_ctype):
print("planning fail")
success = False
end = time.time()
planning_time = end - start
print("planning time is " + str(planning_time))
# load result
x_out = []
y_out = []
phi_out = []
v_out = []
a_out = []
steer_out = []
if visualize_flag and success:
HybridAStar.GetResult(x, y, phi, v, a, steer, size)
for i in range(0, size[0]):
x_out.append(float(x[i]))
y_out.append(float(y[i]))
phi_out.append(float(phi[i]))
v_out.append(float(v[i]))
a_out.append(float(a[i]))
steer_out.append(float(steer[i]))
# plot
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
for i in range(0, size[0]):
downx = 1.055 * math.cos(phi_out[i] - math.pi / 2)
downy = 1.055 * math.sin(phi_out[i] - math.pi / 2)
leftx = 1.043 * math.cos(phi_out[i] - math.pi)
lefty = 1.043 * math.sin(phi_out[i] - math.pi)
x_shift_leftbottom = x_out[i] + downx + leftx
y_shift_leftbottom = y_out[i] + downy + lefty
car = patches.Rectangle((x_shift_leftbottom, y_shift_leftbottom), 3.89 + 1.043, 1.055*2,
angle=phi_out[i] * 180 / math.pi, linewidth=1, edgecolor='r', facecolor='none')
arrow = patches.Arrow(
x_out[i], y_out[i], 0.25*math.cos(phi_out[i]), 0.25*math.sin(phi_out[i]), 0.2)
ax.add_patch(car)
ax.add_patch(arrow)
ax.plot(sx, sy, "s")
ax.plot(ex, ey, "s")
if scenario == "backward":
left_boundary_x = [-13.6407054776, 0.0, 0.0515703622475]
left_boundary_y = [0.0140634663703, 0.0, -5.15258191624]
down_boundary_x = [0.0515703622475, 2.8237895441]
down_boundary_y = [-5.15258191624, -5.15306980547]
right_boundary_x = [2.8237895441, 2.7184833539, 16.3592013995]
right_boundary_y = [-5.15306980547, -0.0398078878812, -0.011889513383]
up_boundary_x = [16.3591910364, -13.6406951857]
up_boundary_y = [5.60414234644, 5.61797800844]
ax.plot(left_boundary_x, left_boundary_y, "k")
ax.plot(down_boundary_x, down_boundary_y, "k")
ax.plot(right_boundary_x, right_boundary_y, "k")
ax.plot(up_boundary_x, up_boundary_y, "k")
plt.axis('equal')
fig2 = plt.figure(2)
v_graph = fig2.add_subplot(311)
v_graph.title.set_text('v')
v_graph.plot(np.linspace(0, size[0], size[0]), v_out)
a_graph = fig2.add_subplot(312)
a_graph.title.set_text('a')
a_graph.plot(np.linspace(0, size[0], size[0]), a_out)
steer_graph = fig2.add_subplot(313)
steer_graph.title.set_text('steering')
steer_graph.plot(np.linspace(0, size[0], size[0]), steer_out)
plt.show()
if not visualize_flag :
if success :
HybridAStar.GetResult(x, y, phi, v, a, steer, size)
for i in range(0, size[0]):
x_out.append(float(x[i]))
y_out.append(float(y[i]))
phi_out.append(float(phi[i]))
v_out.append(float(v[i]))
a_out.append(float(a[i]))
steer_out.append(float(steer[i]))
return success, x_out, y_out, phi_out, v_out, a_out, steer_out, planning_time
if __name__ == '__m | ain__':
visualize_ | flag = True
HybridAStarPlan(visualize_flag)
|
Tosh007/DnD5_CharCreator | menu_warlock.py | Python | gpl-3.0 | 1,304 | 0.003067 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'menu_warlock.ui'
#
# Created by: PyQ | t5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Warlock(object):
def setupUi(self, Warlock):
Warlock.setObjectName("Warlock")
Warlock.resize(261, 181)
self.gridLayout = QtWidgets.QGridLayout(Warlock)
self.gridLayout.setObjectName("gridLayout")
self.label_spellSlotLevel = QtWidgets.QLabel(Warlock)
self.label_spellSlotLevel.setObjectNa | me("label_spellSlotLevel")
self.gridLayout.addWidget(self.label_spellSlotLevel, 1, 0, 1, 1)
self.label_spellSlots = QtWidgets.QLabel(Warlock)
self.label_spellSlots.setObjectName("label_spellSlots")
self.gridLayout.addWidget(self.label_spellSlots, 0, 0, 1, 1)
self.retranslateUi(Warlock)
QtCore.QMetaObject.connectSlotsByName(Warlock)
def retranslateUi(self, Warlock):
_translate = QtCore.QCoreApplication.translate
Warlock.setWindowTitle(_translate("Warlock", "Form"))
self.label_spellSlotLevel.setText(_translate("Warlock", "spell slot level"))
self.label_spellSlots.setText(_translate("Warlock", "number of spell slots"))
|
steder/goose | goose/ftest/__init__.py | Python | mit | 43 | 0 | " | ""Tests that do require database | setup"""
|
egabancho/invenio-communities | invenio_communities/__init__.py | Python | gpl-2.0 | 1,040 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNES | S FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Inven | io; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module that adds support for communities."""
|
magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/examples/nest/music/msgtest.py | Python | gpl-2.0 | 1,200 | 0.001667 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# msgtest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Publi | c License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
nest.sli_run("statusdict/have_music ::")
if not nest.spp():
import sys
print("NEST was not compiled with support for MUSIC, not running.")
sys.exit()
mmip = nest.Create('music_message_in_proxy')
nest.SetStatus(mmip, {'port_name' : 'msgdata'})
# Simulate and ge | t message data with a granularity of 10 ms:
time = 0
while time < 1000:
nest.Simulate (10)
data = nest.GetStatus(mmip, 'data')
print data
time += 10
|
bugobliterator/ardupilot | Tools/autotest/param_metadata/rstemit.py | Python | gpl-3.0 | 10,018 | 0.001897 | #!/usr/bin/env python
from __future__ import print_function
import re
from param import known_param_fields, known_units
from emit import Emit
try:
from cgi import escape as cescape
except Exception:
from html import escape as cescape
# Emit docs in a RST format
class RSTEmit(Emit):
def blurb(self):
if self.sitl:
return """SITL parameters"""
return """This is a complete list of the parameters which can be set (e.g. via the MAVLink protocol) to control vehicle behaviour. They are stored in persistent storage on the vehicle.
This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.
""" # noqa
def toolname(self):
return "Tools/autotest/param_metadata/param_parse.py"
def __init__(self, *args, **kwargs):
Emit.__init__(self, *args, **kwargs)
output_fname = 'Parameters.rst'
self.f = open(output_fname, mode='w')
self.spacer = re.compile("^", re.MULTILINE)
self.rstescape = re.compile("([^a-zA-Z0-9\n ])")
if self.sitl:
parameterlisttype = "SITL Parameter List"
else:
parameterlisttype = "Complete Parameter List"
parameterlisttype += "\n" + "=" * len(parameterlisttype)
self.preamble = """.. Dynamically generated list of documented parameters
.. This page was generated using {toolname}
.. DO NOT EDIT
.. _parameters:
{parameterlisttype}
{blurb}
""".format(blurb=self.escape(self.blurb()),
parameterlisttype=parameterlisttype,
toolname=self.escape(self.toolname()))
self.t = ''
def escape(self, s):
ret = re.sub(self.rstescape, "\\\\\g<1>", s)
return ret
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
pass
def tablify_row(self, rowheading, row, widths, height):
joiner = "|"
row_lines = [x.split("\n") for x in row]
for row_line in row_lines:
row_line.extend([""] * (height - len(row_line)))
if rowheading is not None:
rowheading_lines = rowheading.split("\n")
rowheading_lines.extend([""] * (height - len(rowheading_lines)))
out_lines = []
for i in range(0, height):
out_line = ""
if rowheading is not None:
rowheading_line = rowheading_lines[i]
out_line += joiner + " " + rowheading_line + " " * (widths[0] - len(rowheading_line) - 1)
joiner = "#"
j = 0
for item in row_lines:
widthnum = j
if rowheading is not None:
widthnum += 1
line = item[i]
out_line += joiner + " " + line + " " * (widths[widthnum] - len(line) - 1)
joiner = "|"
j += 1
out_line += "|"
out_lines.append(out_line)
return "\n".join(out_lines)
def tablify_longest_row_length(self, rows, rowheadings, headings):
check_width_rows = rows[:]
if headings is not None:
check_width_rows.append(headings)
longest_row_length = 0
for row in check_width_rows:
if len(row) > longest_row_length:
longest_row_length = len(row)
if rowheadings is not None:
longest_row_length += 1
return longest_row_length
def longest_line_in_string(self, string):
longest = 0
for line in string.split("\n"):
if len(line) > longest:
longest = len(line)
return longest
def tablify_calc_row_widths_heights(self, rows, rowheadings, headings):
rows_to_check = []
if headings is not None:
rows_to_check.append(headings)
rows_to_check.extend(rows[:])
heights = [0] * len(rows_to_check)
longest_row_length = self.tablify_longest_row_length(rows, rowheadings, headings)
widths = [0] * longest_row_length
all_rowheadings = []
if rowheadings is not None:
if headings is not None:
all_rowheadings.append("")
all_rowheadings.extend(rowheadings)
for rownum in range(0, len(rows_to_check)):
row = rows_to_check[rownum]
values_to_check = []
if rowheadings is not None:
values_to_check.append(all_rowheadings[rownum])
values_to_check.extend(row[:])
colnum = 0
for value in values_to_check:
height = len(value.split("\n"))
if height > heights[rownum]:
heights[rownum] = height
longest_line = self.longest_line_in_string(value)
width = longest_line + 2 # +2 for leading/trailing ws
if width > widths[colnum]:
widths[colnum] = width
colnum += 1
return | (widths, heights)
def tablify(self, rows, headings=None, rowheadings=None):
(widths, heights) = self.tablify_calc_row_widths_heights(rows, rowheadings, headings)
# create dividing lines
bar = ""
| heading_bar = ""
for width in widths:
bar += "+"
heading_bar += "+"
bar += "-" * width
heading_bar += "=" * width
bar += "+"
heading_bar += "+"
# create table
ret = bar + "\n"
if headings is not None:
rowheading = None
if rowheadings is not None:
rowheading = ""
ret += self.tablify_row(rowheading, headings, widths, heights[0]) + "\n"
ret += heading_bar + "\n"
for i in range(0, len(rows)):
rowheading = None
height = i
if rowheadings is not None:
rowheading = rowheadings[i]
if headings is not None:
height += 1
ret += self.tablify_row(rowheading, rows[i], widths, heights[height]) + "\n"
ret += bar + "\n"
return ret
def render_prog_values_field(self, render_info, param, field):
values = (param.__dict__[field]).split(',')
rows = []
for value in values:
v = [x.strip() for x in value.split(':')]
rows.append(v)
return self.tablify(rows, headings=render_info["headings"])
def emit(self, g):
tag = '%s Parameters' % self.escape(g.reference)
reference = "parameters_" + g.reference
field_table_info = {
"Values": {
"headings": ['Value', 'Meaning'],
},
"Bitmask": {
"headings": ['Bit', 'Meaning'],
},
}
ret = """
.. _{reference}:
{tag}
{underline}
""".format(tag=tag, underline="-" * len(tag),
reference=reference)
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
# Get param path if defined (i.e. is duplicate parameter)
param_path = getattr(param, 'path', '')
name = param.name.split(':')[-1]
tag_param_path = ' (%s)' % param_path if param_path else ''
tag = '%s%s: %s' % (self.escape(name), self.escape(tag_param_path), self.escape(param.DisplayName),)
tag = tag.strip()
reference = param.name
# remove e.g. "ArduPlane:" from start of parameter name:
reference = reference.split(":")[-1]
if param_path:
reference += '__' + param_path
ret += """
.. _{reference}:
{tag}
{tag_underline}
""".format(tag=tag, tag_underline='~' * len(tag), reference=reference)
if d.get('User', None) == 'Advanced':
ret += '\n| *Note: This parameter is for advanced users*'
ret += "\n\n%s\n" % self.escape(param.Description)
headings = []
row = []
for fie |
smmribeiro/intellij-community | python/testData/refactoring/introduceVariable/substringContainsEscapes.after.py | Python | apache-2.0 | 52 | 0.019231 | a = u"lo \u00d6sterreich\\!\n | "
print(u | "Hel%s\n" % a) |
patrickwolf/python-tutorial | pyintro_b_modules/y_packages_self_contained/package_self_contained/__init__.py | Python | mit | 74 | 0 | from module1 import | Module1
from module2 | import Module2
import subpackage
|
RIEI/printerStats | daemon/models/HPLJ4200/getcounter.py | Python | gpl-2.0 | 373 | 0.008043 | __author__ = 'pferland'
from BeautifulSoup import BeautifulSoup
import urllib2
import re
def getcounter(shost):
html = urllib2.urlopen("http://" + shost + "/hp/device/this.LCDi | spatcher?dispatch=html&cat=0&pos=1")
soup = BeautifulSoup(html.read())
for supply in soup.findAll("font", {'face': 'Helvetica,Arial'}):
pri | nt str(supply) + "\r\n"
return 0 |
thetypist/scrappybot | quotesbot/pipelines.py | Python | mit | 289 | 0 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/e | n/latest/topics/item-pi | peline.html
class QuotesbotPipeline(object):
def process_item(self, item, spider):
return item
|
Zanzibar82/pelisalacarta | python/main-classic/channels/youanimehd.py | Python | gpl-3.0 | 12,881 | 0.020053 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para youanimehd creado por Itsuki Minami
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
DEBUG = config.get_setting("debug")
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0"
__category__ = "A"
__type__ = "generic"
__title__ = "YouAnimeHd"
__channel__ = "youanimehd"
__language__ = "ES"
__creationdate__ = "130202"
def isGeneric():
return True
def mainlist(item):
logger.info("[youanimehd.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, action="completo" , title="Portada" , url="http://youanimehd.com/" ))
itemlist.append( Item(channel=__channel__, action="letras" , title="Listado Alfabetico" , url="http://youanimehd.com/" ))
itemlist.append( Item(channel=__channel__, action="completo" , title="Listado Completo de Animes" , url="http://youanimehd.com/videos" ))
itemlist.append( Item(channel=__channel__, action="completo" , title="Listado Completo de Peliculas" , url="http://youanimehd.com/tags/pelicula" ))
itemlist.append( Item(channel=__channel__, action="completo" , title="Listado Completo de Dibujos" , url="http://youanimehd.com/tags/cartoon" ))
itemlist.append( Item(channel=__channel__, action="completo" , title="Listado Completo de Doramas" , url="http://youanimehd.com/tags/dorama" ))
#itemlist.append( Item(channel=__channel__, action="search" , title="Buscar" , url="http://youanimehd.com/buscar/" ))
return itemlist
def completo(item):
logger.info("[youanimehd.py] completo")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
'''
<li class="mainList">
<div class="videoThumb">
<a href="http://youanimehd.com/video/438/Bakuman-Temporada-03" title="Mashiro al principio ansiaba convertirse en mangaka por la admiración que sentía
por su tio Nobuhiro Mashiro, hasta que este falleció y no quiso saber..."><img src="http://youanimehd.com/thumb/1_438.jpg" alt="Mashiro al principio ansiaba convertirse en mangaka por la admiración que sentía por su tio Nobuhiro Mashiro, hasta que este falleció y no quiso saber nada del manga; hasta que un día tras olvidarse un libro en clase se encuentra con Akito Takagi que le propone crear manga con él, Mashiro al principio se niega, pero tras las constantes insistencias de Takagi acaba aceptando. El duo de Mashiro y Takagi apodado como Ashirogi Muto descubrirán lo que es el mundo del manga." id="rotate_438_latestvideo" /></a>
</div>
<div class="videoTitle">
<a href="http://youanimehd.com/video/438/Bakuman-Temporada-03">Bakuman Temporada 03</a>
</div>
<div class="videoInfo">
<div class="videoViews">67 Views</div>
<div class="videoStars">
not yet rated
</div>
</div>
</li>
'''
patronvideos = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)" id="[^"]+" alt="[^"]+"[^<]+</a[^<]+</div[^<]+<div class="videoTitle"[^<]+<a[^>]+>([^<]+)</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for url,plot,thumbnail,title in matches:
scrapedtitle = title
fulltitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,url)
scrapedthumbnail = thumbnail
scrapedplot = plot
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="serie" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, fulltitle=fulltitle, viewmode="movie_with_plot"))
patronvideos = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)" alt="[^"]+"[^<]+</a[^<]+</div[^<]+<div class="videoTitle"[^<]+<a[^>]+>([^<]+)</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for url,plot,thumbnail,title in matches:
scrapedtitle = title
fulltitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,url)
scrapedthumbnail = thumbnail
scrapedplot = plot
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="serie" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, fulltitle=fulltitle, viewmode="movie_with_plot"))
patron = '<li><a href="([^"]+)">Next</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
if len(matches) > 0:
scrapedurl = matches[0]
scrapedtitle = "!Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="completo", title=scrapedtitle , url=scrapedurl) )
return itemlist
def letras(item):
logger.info("[youanimehd.py] letras")
itemlist = []
itemlist.append( Item(channel=__channel__, action="completo" , title="0- | 9", url="http://youanimehd.com/tags/0-9"))
itemlist.append( Item(channel=__channel__, action="completo" , title="A" , url="http://youanimehd.com/tags/a"))
itemlist.append( Item(channel=__channel__, action="completo" , title="B" , url="http://youanimehd.com/tags/b"))
itemlist.append( Item(channel=__channel__, action="completo" , title="C" , url="http://youanimehd.com/tags/c"))
itemlist.append( Item(channel=__chan | nel__, action="completo" , title="D" , url="http://youanimehd.com/tags/d"))
itemlist.append( Item(channel=__channel__, action="completo" , title="E" , url="http://youanimehd.com/tags/e"))
itemlist.append( Item(channel=__channel__, action="completo" , title="F" , url="http://youanimehd.com/tags/f"))
itemlist.append( Item(channel=__channel__, action="completo" , title="G" , url="http://youanimehd.com/tags/g"))
itemlist.append( Item(channel=__channel__, action="completo" , title="H" , url="http://youanimehd.com/tags/h"))
itemlist.append( Item(channel=__channel__, action="completo" , title="I" , url="http://youanimehd.com/tags/i"))
itemlist.append( Item(channel=__channel__, action="completo" , title="J" , url="http://youanimehd.com/tags/j"))
itemlist.append( Item(channel=__channel__, action="completo" , title="K" , url="http://youanimehd.com/tags/k"))
itemlist.append( Item(channel=__channel__, action="completo" , title="L" , url="http://youanimehd.com/tags/l"))
itemlist.append( Item(channel=__channel__, action="completo" , title="M" , url="http://youanimehd.com/tags/m"))
itemlist.append( Item(channel=__channel__, action="completo" , title="N" , url="http://youanimehd.com/tags/n"))
itemlist.append( Item(channel=__channel__, action="completo" , title="O" , url="http://youanimehd.com/tags/o"))
itemlist.append( Item(channel=__channel__, action="completo" , title="P" , url="http://youanimehd.com/tags/p"))
itemlist.append( Item(channel=__channel__, action="completo" , title="Q" , url="http://youanimehd.com/tags/q"))
itemlist.append( Item(channel=__channel__, action="completo" , title="R" , url="http://youanimehd.com/tags/r"))
itemlist.append( Item(channel=__channel__, action="completo" , title="S" , url="http://youanimehd.com/tags/s"))
itemlist.append( Item(channel=__channel__, action="completo" , title="T" , url="http://youanimehd.com/tags/t"))
itemlist.append( Item(channel=__channel__, action="completo" , title="U" , url="http://youanimehd.com/tags/u"))
itemlist.append( Item(channel=__channel__, action="completo" , title="V" , url="http://youanimehd.com/tags/v"))
itemlist.append( Item(channel=__channel__, action="completo" , title="W" , url="http://youanimehd.com/tags/w"))
itemlist.app |
TeamBasedLearning/Service | pgtbl/core/tests/test_register_news.py | Python | gpl-3.0 | 2,899 | 0 | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from accounts.models import User
from core.models import News
class CreateNewsTestCase(APITestCase):
"""
Unit test case to test creating a news in the system.
"""
def setUp(self):
"""
This method will run before any test.
"""
self.superuser = User.objects.create_superuser(
name='Victor Arnaud',
email='victorhad@gmail.com',
password='victorhad123456'
)
self.user = User.objects.create(
name='Pedro Calile',
email='pedro@gmail.com',
password='pedro123456'
)
self.url = reverse('news:list-create')
def tearDown(self):
"""
This method will run after any test.
"""
self.superuser.delete()
| self.user.delete()
def test_valid_create_news(self):
| """
Admin create a news in the system.
"""
self.client.force_authenticate(self.superuser)
self.assertEquals(News.objects.count(), 0)
data = {
'title': 'News title',
'description': 'News description...',
}
response = self.client.post(self.url, data)
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(News.objects.count(), 1)
def test_invalid_title_created_news(self):
"""
Admin can't create a user without a title.
"""
self.client.force_authenticate(self.superuser)
self.assertEquals(News.objects.count(), 0)
data = {
'description': 'News description...',
}
response = self.client.post(self.url, data)
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(News.objects.count(), 0)
def test_invalid_description_create_news(self):
"""
Admin can't create a news without a description.
"""
self.client.force_authenticate(self.superuser)
self.assertEquals(News.objects.count(), 0)
data = {
'title': 'News title',
}
response = self.client.post(self.url, data)
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(News.objects.count(), 0)
def test_invalid_user_not_admin_create_news(self):
"""
Only admin can create a news.
"""
self.client.force_authenticate(self.user)
self.assertEquals(News.objects.count(), 0)
data = {
'title': 'News title',
'description': 'News descriptions...',
}
response = self.client.post(self.url, data)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEquals(News.objects.count(), 0)
|
wetneb/django | tests/gis_tests/geo3d/tests.py | Python | bsd-3-clause | 12,923 | 0.002089 | from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_storage")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self | ._load_city_data()
for name, pnt_data in ci | ty_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(poin |
TamiaLab/carnetdumaker | apps/loginwatcher/admin.py | Python | agpl-3.0 | 520 | 0 | """ |
Admin views for the log watcher app.
"""
from django.contrib import admin
from .models import LogEvent
class LogEventAdmin(admin.ModelAdmin):
"""
Admin form for the ``LogEvent`` data model.
"""
list_display = ('event_date',
'type',
'username',
'ip_address')
search_fields = ('username',
'ip_address')
list_filter = ('type',
'event_date')
admin.site.register(LogEvent, LogEv | entAdmin)
|
erp12/pyshgp | examples/iris.py | Python | mit | 1,859 | 0.001076 | import random
import pandas as pd
from pyshgp.push.instruction_set import InstructionSet
from pyshgp.gp.estimators import PushEstimator
from pyshgp.gp.genome import GeneSpawner
# A sample of the famous Iris dataset.
data = pd.DataFrame(
data=[
[5.1, 3.5, 1.4, 0.2, 0],
[4.9, 3.0, 1.4, 0.2, 0],
[4.7, 3.2, 1.3, 0.2, 0],
[4.6, 3.1, 1.5, 0.2, 0],
[5.0, 3.6, 1.4, 0.2, 0],
[5.4, 3.9, 1.7, 0.4, 0],
[4.6, 3.4, | 1.4, 0.3, 0],
[5.0, 3.4, 1.5, 0.2, 0],
[4.4, 2.9, 1.4, 0.2, 0],
[4.9, 3.1, 1.5, 0.1, 0],
[7.0, 3.2, 4.7, 1.4, 1],
[6.4, 3.2, 4.5, 1.5, 1],
[6.9, 3.1, 4.9, 1.5, 1],
[5.5, 2.3, 4.0, 1.3, 1] | ,
[6.5, 2.8, 4.6, 1.5, 1],
[5.7, 2.8, 4.5, 1.3, 1],
[6.3, 3.3, 4.7, 1.6, 1],
[4.9, 2.4, 3.3, 1.0, 1],
[6.6, 2.9, 4.6, 1.3, 1],
[5.2, 2.7, 3.9, 1.4, 1],
[6.3, 3.3, 6.0, 2.5, 2],
[5.8, 2.7, 5.1, 1.9, 2],
[7.1, 3.0, 5.9, 2.1, 2],
[6.3, 2.9, 5.6, 1.8, 2],
[6.5, 3.0, 5.8, 2.2, 2],
[7.6, 3.0, 6.6, 2.1, 2],
[4.9, 2.5, 4.5, 1.7, 2],
[7.3, 2.9, 6.3, 1.8, 2],
[6.7, 2.5, 5.8, 1.8, 2],
[7.2, 3.6, 6.1, 2.5, 2],
],
columns=["sepal_length", "sepal_width", "petal_length", "petal_width", "label"]
)
spawner = GeneSpawner(
n_inputs=1,
instruction_set=InstructionSet().register_core_by_stack({"bool", "int", "float"}),
literals=[0, 1, 2],
erc_generators=[
lambda: random.randint(0, 10),
random.random
]
)
if __name__ == "__main__":
est = PushEstimator(
spawner=spawner,
population_size=300,
max_generations=100,
verbose=2
)
x = data[["sepal_length", "sepal_width", "petal_length", "petal_width"]]
y = data[["label"]]
est.fit(x, y)
|
manhhomienbienthuy/pythondotorg | users/migrations/0004_auto_20150503_2100.py | Python | apache-2.0 | 776 | 0.002577 | from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20150503_2026'),
]
operations = [
migrations.AddField(
mod | el_name='membership',
name='membership_type',
field=models.IntegerField(choices=[(0, 'Basic Member'), (1, 'Supporting Member'), (2, 'Sponsor Member'), (3, 'Managing Member'), (4, 'Contributing Member'), (5, 'Fellow')], default=0),
preserve_default=True,
),
migrations.AlterField(
| model_name='membership',
name='votes',
field=models.BooleanField(verbose_name='I would like to be a PSF Voting Member', default=False),
preserve_default=True,
),
]
|
oaksharks/BBS | main/test.py | Python | gpl-3.0 | 211 | 0.031746 | #coding:utf8
import httplib2
import urllib
from connutils import HTTP
import json
import re
import bs4
| import MySQLdb
aa={"name":"wuhaifeng","age":11}
# i | n 可以判断map中是否包含某个key
print aa["wu"] |
TakayukiSakai/tensorflow | tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py | Python | apache-2.0 | 5,371 | 0.005399 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data in a constant.
Run using bazel:
bazel run -c opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', '/tmp/data',
'Directory to put the training data.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data, pin to CPU because rest of pipeline is CPU-only
with tf.device('/cpu:0'):
input_images = tf.constant(data_sets.train.images)
input_labels = tf.constant(data_sets.train.labels)
image, label = tf.train.slice_input_producer(
[inpu | t_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist | .training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.initialize_all_variables()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
exercism/python | exercises/practice/grade-school/grade_school.py | Python | mit | 225 | 0 | class S | chool:
def __init__(self):
pass
def add_student(self, name, grade):
pass
def roster(self):
pass
def grade(self, grade_number):
pass
| def added(self):
pass
|
shiftkey/electron | build/zip.py | Python | mit | 1,544 | 0.016839 | #!/usr/bin/env python
import os
import subprocess
import sys
import zipfile
LINUX_BINARIES_TO_STRIP = [
'electron',
'libffmpeg.so',
'libnode.so'
]
def strip_binaries(target_cpu, dep):
for binary in LINUX_BINARIES_TO_STRIP:
if dep.endswith(binary):
strip_binary(dep, target_cpu)
def strip_binary(binary_path, target_cpu):
if target_cpu == 'arm':
strip = 'arm-linux-gnueabihf-strip'
elif t | arget_cpu == 'arm64':
strip = 'aarch64-linux-gnu-strip'
elif target_cpu == 'mips64el':
strip = 'mips64el-redhat-linux-strip'
else:
strip = 'strip'
execute([strip, binary_path])
def execute(argv):
try:
output = subprocess.check_output(argv, stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError as e:
print e.output
raise e
def main(argv):
dist_zip, runtime_deps, target_cpu, target_os = argv
dist_files = []
with open(runtime | _deps) as f:
for dep in f.readlines():
dep = dep.strip()
dist_files += [dep]
if sys.platform == 'darwin':
mac_zip_results = execute(['zip', '-r', '-y', dist_zip] + dist_files)
else:
with zipfile.ZipFile(dist_zip, 'w', zipfile.ZIP_DEFLATED) as z:
for dep in dist_files:
if target_os == 'linux':
strip_binaries(target_cpu, dep)
if os.path.isdir(dep):
for root, dirs, files in os.walk(dep):
for file in files:
z.write(os.path.join(root, file))
else:
z.write(dep)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Scan-o-Matic/scanomatic | tests/unit/image_analysis/conftest.py | Python | gpl-3.0 | 470 | 0 | from __future__ import absolute_import
import os
import pytest
from scipy import ndimage
TESTDATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata')
@pytest.fixture(scope='session')
def easy_plate():
return ndimage.io.imread(
os.path.join(TESTDATA, 'test_fixture_easy.tiff')
)
|
@pyt | est.fixture(scope='session')
def hard_plate():
return ndimage.io.imread(
os.path.join(TESTDATA, 'test_fixture_difficult.tiff')
)
|
l8orre/nxtPwt | nxtPwt/nxtWin5Control1.py | Python | mit | 2,721 | 0.016538 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2014 l8orre
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
#import sys
from PyQt4 import QtGui, Qt, QtCore
from PyQt4.QtCore import QObject , pyqtSignal, pyqtSlot, SIGNAL
from PyQt4.QtCore import QObject
#import numpy as np
from os import listdir as ls
#from PyQt4.Qt import QPixmap
import os
import time
import nxtPwt
#import requests, json
class nxtWin5Control(QObject):
""" class nxtWin5Control(): here"""
def __init__(self, app): #, application
super(QObject, self, ).__init__()
import nxtPwt.ui_nxtWin5 as nxtWin5 # the QtCreator-generated Widget.py!!
ui = nxtWin5.Ui_MainWindow() # Ui_MainWindow() is the autogenerated class in m2def.py
self.ui_nxtWin5 = ui
self.app = app #
# self.userDataContainer = self.app.nxtMain.userDataContainer
self.server = ''
self.account =''
self.secretPhrase = ''
#self.app.algo.ui_nxtWin = ui # make the ui_AlgoWin known to the Algo!!! this is N( at the algo when init'ing
self.app.nxtWin5 = self # make thi | s WinControl1 known
def init(self): #, ui_AlgoWin):
""" the AlgoWin """
# maybe this gives trouble w/ MainWIn, self.app.algo = Algo1(self.app, u | i)
### re - init hte algo here!!!
ui = self.ui_nxtWin5
############################
############################
############################
########## Window Maintenance
def show(self):
self.uiFrame = QtGui.QMainWindow()
self.ui_nxtWin5.setupUi(self.uiFrame)
self.init() #self.ui_AlgoWin)
self.uiFrame.show()
|
RemiZOffAlex/specialistoff.net | Zabbix/scripts/test.py | Python | mit | 1,067 | 0.008563 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import argparse
import os
import re
import subprocess
import sys
import traceback
import json
def main():
parser = argparse.ArgumentParser(description='Test discovery for Zabbix',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--discovery", dest="discovery", action='store_true', help="Режим обнаружения")
parser.add_argument('integers', metavar='N', | type=int, nargs='*',
help='an integer for the accumulator')
args = parser.parse_args()
if args.discovery:
data = [
{"{#T1}": "1","{#T2}": "1"},
{"{#T1}": "1","{#T2}": "2"},
{"{#T1}": "2","{#T2}": "1"},
{"{#T1}": "2","{#T2}": "2"}
]
result = json.dumps({"data": data})
print result
else:
print str(args.integers[0] + args.integers[1])
if __name__ == "__main__":
try:
main()
exce | pt Exception, ex:
traceback.print_exc(file=sys.stdout)
exit(1)
exit(0)
|
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/examples/openmdao.examples.expected_improvement/openmdao/examples/expected_improvement/test/test_single_objective_ei.py | Python | mit | 1,872 | 0.000534 | """
Test for single criteria EI example.
"""
import logging
import os.path
import random
import unittest
from numpy import pi
from openmdao.examples.expected_improvement.single_objective_ei import Analysis
from openmdao.main.api import set_as_top
from pyevolve import Selectors
class SingleObjectiv | eEITest(unittest.TestCase):
"""Test to make sure the EI sample problem works as it should"""
def tearDown(self):
if os.path.exists('adapt.csv'):
os.remove('adapt.csv')
def test_EI(self):
random.seed(0)
# pyevolve does some caching that causes failures during our
# c | omplete unit tests due to stale values in the cache attributes
# below, so reset them here
Selectors.GRankSelector.cachePopID = None
Selectors.GRankSelector.cacheCount = None
Selectors.GRouletteWheel.cachePopID = None
Selectors.GRouletteWheel.cacheWheel = None
analysis = Analysis()
set_as_top(analysis)
# analysis.DOE_trainer.DOEgenerator = FullFactorial(num_levels=10)
analysis.run()
# This test looks for the presence of at least one point close to
# each optimum.
print analysis.ei.EI
print analysis.meta.x
print analysis.meta.y
points = [(-pi, 12.275, .39789), (pi, 2.275, .39789), (9.42478, 2.745, .39789)]
errors = []
for x, y, z in points:
analysis.meta.x = x
analysis.meta.y = y
analysis.meta.execute()
errors.append(abs((analysis.meta.f_xy.mu - z) / z * 100))
avg_error = sum(errors) / float(len(errors))
logging.info('#errors %s, sum(errors) %s, avg_error %s',
len(errors), sum(errors), avg_error)
self.assertTrue(avg_error <= 35)
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
miur/miur | OLD/miur/graph/transform.py | Python | gpl-3.0 | 554 | 0.001805 | #
# SPDX-FileCopyrightText: 2017 Dmytro Kolomoiets <amerlyq@gmail.com> and c | ontributors.
#
# SPDX-License-Identifier: GPL-3.0-only
#
class NodeSuperimposeTr(object):
def __call__(self, g, node_uid, aug):
conv = {}
for uid in aug:
if uid == aug.get_root():
g[node_uid] = aug[uid]
conv[uid] = node_uid
else:
conv[uid] = g.add_object(aug[uid])
for uid in aug:
for edge in aug.neighbors(uid):
g.add_arrow(conv[uid], conv[edge])
| |
Cisco-Talos/pyrebox | volatility/volatility/plugins/overlays/windows/vista_sp12_x86_syscalls.py | Python | gpl-2.0 | 43,170 | 0.05388 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
syscalls = [
[
'NtAcceptConnectPort', # 0x0
'NtAccessCheck', # 0x1
'NtAccessCheckAndAuditAlarm', # 0x2
'NtAccessCheckByType', # 0x3
'NtAccessCheckByTypeAndAuditAlarm', # 0x4
'NtAccessCheckByTypeResultList', # 0x5
'NtAccessCheckByTypeResultListAndAuditAlarm', # 0x6
'NtAccessCheckByTypeResultListAndAuditAlarmByHandle', # 0x7
'NtAddAtom', # 0x8
'NtAddBootEntry', # 0x9
'NtAddDriverEntry', # 0xa
'NtAdjustGroupsToken', # 0xb
'NtAdjustPrivilegesToken', # 0xc
'NtAlertResumeThread', # 0xd
'NtAlertThread', # 0xe
'NtAllocateLocallyUniqueId', # 0xf
'NtAllocateUserPhysicalPages', # 0x10
'NtAllocateUuids', # 0x11
'NtAllocateVirtualMemory', # 0x12
'NtAlpcAcceptConnectPort', # 0x13
'NtAlpcCancelMessage', # 0x14
'NtAlpcConnectPort', # 0x15
'NtAlpcCreatePort', # 0x16
'NtAlpcCreatePortSection', # 0x17
'NtAlpcCreateResourceReserve', # 0x18
'NtAlpcCreateSectionView', # 0x19
'NtAlpcCreateSecurityContext', # 0x1a
'NtAlpcDeletePortSection', # 0x1b
'NtAlpcDeleteResourceReserve', # 0x1c
'NtAlpcDeleteSectionView', # 0x1d
'NtAlpcDeleteSecurityContext', # 0x1e
'NtAlpcDisconnectPort', # 0x1f
'NtAlpcImpersonateClientOfPort', # 0x20
'NtAlpcOpenSenderProcess', # 0x21
'NtAlpcOpenSenderThread', # 0x22
'NtAlpcQueryInformation', # 0x23
'NtAlpcQueryInformationMessage', # 0x24
'NtAlpcRevokeSecurityContext', # 0x25
'NtAlpcSendWaitReceivePort', # 0x26
'NtAlpcSetInformation', # 0x27
'NtApphelpCacheControl', # 0x28
'NtAreMappedFilesTheSame', # 0x29
'NtAssignProcessToJobObject', # 0x2a
'NtCallbackReturn', # 0x2b
'NtCancelDeviceWakeupRequest', # 0x2c
'NtCancelIoFile', # 0x2d
'NtCancelTimer', # 0x2e
'NtClearEvent', # 0x2f
'NtClose', # 0x30
'NtCloseObjectAuditAlarm', # 0x31
'NtCompactKeys', # 0x32
'NtCompareTokens', # 0x33
'NtCompleteConnectPort', # 0x34
'NtCompressKey', # 0x35
'NtConnectPort', # 0x36
'NtContinue', # 0x37
'NtCreateDebugObject', # 0x38
'NtCreateDirectoryObject', # 0x39
'NtCreateEvent', # 0x3a
'NtCreateEventPair', # 0x3b
'NtCreateFile', # 0x3c
'NtCreateIoCompletion', # 0x3d
'NtCreateJobObject', # 0x3e
'NtCreateJobSet', # 0x3f
'NtCreateKey', # 0x40
'NtCreateKeyTransacted', # 0x41
'NtCreateMailslotFile', # 0x42
'NtCreateMutant', # 0x43
'NtCreateNamedPipeFile', # 0x44
'NtCreatePrivateNamespace', # 0x45
'NtCreatePagingFile', # 0x46
'NtCreatePort', # 0x47
'NtCreateProcess', # | 0x48
'NtCreateProcessEx', # 0x49
'NtCreateProfile', # 0x4a
'NtCreateSection', # 0x4b
'NtCreateSemaphore', | # 0x4c
'NtCreateSymbolicLinkObject', # 0x4d
'NtCreateThread', # 0x4e
'NtCreateTimer', # 0x4f
'NtCreateToken', # 0x50
'NtCreateTransaction', # 0x51
'NtOpenTransaction', # 0x52
'NtQueryInformationTransaction', # 0x53
'NtQueryInformationTransactionManager', # 0x54
'NtPrePrepareEnlistment', # 0x55
'NtPrepareEnlistment', # 0x56
'NtCommitEnlistment', # 0x57
'NtReadOnlyEnlistment', # 0x58
'NtRollbackComplete', # 0x59
'NtRollbackEnlistment', # 0x5a
'NtCommitTransaction', # 0x5b
'NtRollbackTransaction', # 0x5c
'NtPrePrepareComplete', # 0x5d
'NtPrepareComplete', # 0x5e
'NtCommitComplete', # 0x5f
'NtSinglePhaseReject', # 0x60
'NtSetInformationTransaction', # 0x61
'NtSetInformationTransactionManager', # 0x62
'NtSetInformationResourceManager', # 0x63
'NtCreateTransactionManager', # 0x64
'NtOpenTransactionManager', # 0x65
'NtRenameTransactionManager', # 0x66
'NtRollforwardTransactionManager', # 0x67
'NtRecoverEnlistment', # 0x68
'NtRecoverResourceManager', # 0x69
'NtRecoverTransactionManager', # 0x6a
'NtCreateResourceManager', # 0x6b
'NtOpenResourceManager', # 0x6c
'NtGetNotificationResourceManager', # 0x6d
'NtQueryInformationResourceManager', # 0x6e
'NtCreateEnlistment', # 0x6f
'NtOpenEnlistment', # 0x70
'NtSetInformationEnlistment', # 0x71
'NtQueryInformationEnlistment', # 0x72
'NtCreateWaitablePort', # 0x73
'NtDebugActiveProcess', # 0x74
'NtDebugContinue', # 0x75
'NtDelayExecution', # 0x76
'NtDeleteAtom', # 0x77
'NtDeleteBootEntry', # 0x78
'NtDeleteDriverEntry', # 0x79
'NtDeleteFile', # 0x7a
'NtDeleteKey', # 0x7b
'NtDeletePrivateNamespace', # 0x7c
'NtDeleteObjectAuditAlarm', # 0x7d
'NtDeleteValueKey', # 0x7e
'NtDeviceIoControlFile', # 0x7f
'NtDisplayString', # 0x80
'NtDuplicateObject', # 0x81
'NtDuplicateToken', # 0x82
'NtEnumerateBootEntries', # 0x83
'NtEnumerateDriverEntries', # 0x84
'NtEnumerateKey', # 0x85
'NtEnumerateSystemEnvironmentValuesEx', # 0x86
'NtEnumerateTransactionObject', # 0x87
'NtEnumerateValueKey', # 0x88
'NtExtendSection', # 0x89
'NtFilterToken', # 0x8a
'NtFindAtom', # 0x8b
'NtFlushBuffersFile', # 0x8c
'NtFlushInstructionCache', # 0x8d
'NtFlushKey', # 0x8e
'NtFlushProcessWriteBuffers', # 0x8f
'NtFlushVirtualMemory', # 0x90
'NtFlushWriteBuffer', # 0x91
'NtFreeUserPhysicalPages', # 0x92
'NtFreeVirtualMemory', # 0x93
'NtFreezeRegistry', # 0x94
'NtFreezeTransactions', # 0x95
'NtFsControlFile', # 0x96
'NtGetContextThread', # 0x97
'NtGetDevicePowerState', # 0x98
'NtGetNlsSectionPtr', # 0x99
'NtGetPlugPlayEvent', # 0x9a
'NtGetWriteWatch', # 0x9b
'NtImpersonateAnonymousToken', # 0x9c
'NtImpersonateClientOfPort', # 0x9d
'NtImpersonateThread', # 0x9e
'NtInitializeNlsFiles', # 0x9f
'NtInitializeRegistry', # 0xa0
'NtInitiatePowerAction', # 0xa1
'NtIsProcessInJob', # 0xa2
'NtIsSystemResumeAutomatic', # 0xa3
'NtListenPort', # 0xa4
'NtLoadDriver', # 0xa5
'NtLoadKey', # 0xa6
'NtLoadKey2', # 0xa7
'NtLoadKeyEx', # 0xa8
'NtLockFile', # 0xa9
'NtLockProductActivationKeys', # 0xaa
'NtLockRegistryKey', # 0xab
'NtLockVirtualMemory', # 0xac
'NtMakePermanentObject', # 0xad
'NtMakeTemporaryObject', # 0xae
'NtMapUserPhysicalPages', # 0xaf
'NtMapUserPhysicalPagesScatter', # 0xb0
'NtMapViewOfSection', # 0xb1
'NtModifyBootEntry', # 0xb2
'NtModifyDriverEntry', # 0xb3
'NtNotifyChangeDirectoryFile', # 0xb4
'NtNotifyChangeKey', # 0xb5
'NtNotifyChangeMultipleKeys', # 0xb6
'NtOpenDirectoryObject', # 0xb7
'NtOpenEvent', # 0xb8
'NtOpenEventPair', # 0xb9
'NtOpenFile', # 0xba
'NtOpenIoCompletion', # 0xbb
'NtOpenJobObject', # 0xbc
'NtOpenKey', # 0xbd
'NtOpenKeyTransacted', # 0xbe
'NtOpenMutant', # 0xbf
'NtOpenPrivateNamespace', # 0xc0
'NtOpenObjectAuditAlarm', # 0xc1
'NtOpenProcess', # 0xc2
'NtOpenProcessToken', # 0xc3
'NtOpenProcessTokenEx', # 0xc4
'NtOpenSection', # 0xc5
'NtOpenSemaphore', # 0xc6
'NtOpenSession', # 0xc7
'NtOpenSymbolicLinkObject', # 0xc8
'NtOpenThread', # 0xc9
'NtOpenThreadToken', # 0xca
'NtOpenThreadTokenEx', # 0xcb
'NtOpenTimer', # 0xcc
'NtPlugPlayControl', # 0xcd
'NtPowerInformation', # 0xce
'NtPrivilegeCheck', # 0xcf
'NtPrivilegeObjectAuditAlarm', # 0xd0
'NtPrivilegedServiceAuditAlarm', # 0xd1
'NtProtectVirtualMemory', # 0xd2
'NtPulseEvent', # 0xd3
'NtQueryAttributesFile', # 0xd4
'NtQueryBootEntryOrder', # 0xd5
'NtQueryBootOptions' |
vncastanheira/noirgame | player.py | Python | lgpl-3.0 | 3,021 | 0.032771 | # Copyright Vinicius Castanheira (vncastanheira@gmail.com) - 2012
# This program is a part of the Noir Game.
# This program is under the Gnu LGPL.
from pygame import *
from pyganim import *
from entity import Entity
from bullet import Bullet
class Player(Entity):
def __init__(self, x, y, images):
Entity.__init__(self, x, y, images)
# Properties
self.alive = True
self.isHidden = True
self.bullets = 6
# Directions variables
self.goLeft = self.goRight = False
self.facingDirection = 'RIGHT'
# An exclamation mark, showing that the player is visible
self.exclamation = Entity(self.rect.centerx, self.rect.top + 16, ["exclamation.png"])
# Group of bullets objects, for updating and drawing
self.bulletGroup = sprite.Group()
# Dead animation
self.animationDead = PygAnimation([("player_dead.png", 0.5)])
def events(self, event):
if self.alive:
if event.type == KEYDOWN and event.key == K_RIGHT:
self.goRight = True
self.goLeft = False
if self.facingDirection == 'LEFT':
self.flip()
self.facingDirection = 'RIGHT'
if event.type == KEYDOWN and event.key == K_LEFT:
self.goRight = False
self.goLeft = True
if self.facingDirection == 'RIGHT':
self.flip()
self.facingDirection = 'LEFT'
if event.type == KEYDOWN and event.key == K_z and not self.isHidden and self.bullets > 0:
shoot = mixer.Sound("shoot.ogg" | )
shoot.play()
bulletDir | ection = 0
if self.facingDirection == 'RIGHT':
bulletDirection = self.rect.right
if self.facingDirection == 'LEFT':
bulletDirection = self.rect.left
self.bulletGroup.add(Bullet(bulletDirection, self.rect.centery, self.facingDirection, ["bullet.png"]))
self.bullets -= 1
if event.type == KEYUP and event.key == K_RIGHT:
self.goRight = False
if event.type == KEYUP and event.key == K_LEFT:
self.goLeft = False
def update(self):
movement = 4
if self.alive:
if self.rect.centerx - movement <=0 :
self.rect.centerx = 4
if self.goRight:
self.move(movement,0)
if self.goLeft:
self.move(-movement,0)
if self.rect.centerx >= 640:
return 'NEXTLEVEL'
def draw(self, display):
if self.alive:
self.animation.play()
self.animation.blit(display, (self.rect.x,self.rect.y))
if not self.isHidden:
self.exclamation.animation.play()
self.exclamation.animation.blit(display, (self.rect.centerx - 4, self.rect.top - 20))
else:
self.animationDead.play()
self.animationDead.blit(display, (self.rect.x,self.rect.y))
# Another functions
def flip(self):
self.animation.flip(True, False)
def spotlightCollision(self, spolight):
if sprite.collide_rect(self, spolight):
self.isHidden = False
return True
else:
self.isHidden = True
return False
def bulletCollision(self, bullet):
if sprite.collide_rect(self, bullet) and not self.isHidden and self.alive:
die = mixer.Sound("dead.ogg")
die.play()
self.alive = False
return True # Collision occurred
return False # Otherwise
|
xudongyangwork/algo | day43/xudy.py | Python | mit | 251 | 0.007968 | # -*- coding:utf-8 -*- |
def length_of_last_word(str_):
split_str = str_.split(" ")
if not split_str:
return 0
return len(split_str[-1])
if __name__ == '__main__':
result = length_of_last | _word("hello world")
print(result) |
cul-it/Invenio | modules/webstyle/lib/webinterface_handler_wsgi.py | Python | gpl-2.0 | 27,426 | 0.004704 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""mod_python->WSGI Framework"""
import sys
import os
import re
import cgi
import inspect
from fnmatch import fnmatch
from urlparse import urlparse, urlunparse
from wsgiref.validate import validator
from wsgiref.util import FileWrapper, guess_scheme
if __name__ != "__main__":
# Chances are that we are inside mod_wsgi.
## You can't write to stdout in mod_wsgi, but some of our
## dependecies do this! (e.g. 4Suite)
sys.stdout = sys.stderr
from invenio.session import get_session
from invenio.webinterface_handler import CFG_HAS_HTTPS_SUPPORT, CFG_FULL_HTTPS
from invenio.webinterface_layout import invenio_handler
from invenio.webinterface_handler_wsgi_utils import table, FieldStorage
from invenio.webinterface_handler_config import \
HTTP_STATUS_MAP, SERVER_RETURN, OK, DONE, \
HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR
from invenio.config import CFG_WEBDIR, CFG_SITE_LANG, \
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST, CFG_DEVEL_SITE, CFG_SITE_URL, \
CFG_SITE_SECURE_URL, CFG_WEBSTYLE_REVERSE_PROXY_IPS
from invenio.errorlib import register_exception, get_pretty_traceback
## Static files are usually handled directly by the webserver (e.g. Apache)
## However in case WSGI is required to handle static files too (such
## as when running wsgiref simple server), then this flag can be
## turned on (it is done automatically by wsgi_handler_test).
CFG_WSGI_SERVE_STATIC_FILES = False
## Magic regexp to search for usage of CFG_SITE_URL within src/href or
## any src usage of an external website
_RE_HTTPS_REPLACES = re.compile(r"\b((?:src\s*=|url\s*\()\s*[\"']?)http\://", re.I)
## Regexp to verify that the IP starts with a number (filter cases where 'unknown')
## It is faster to verify only the start (585 ns) compared with verifying
## the whole ip address - re.compile('^\d+\.\d+\.\d+\.\d+$') (1.01 µs)
_RE_IPADDRESS_START = re.compile("^\d+\.")
def _http_replace_func(match):
## src external_site -> CFG_SITE_SECURE_URL/sslredirect/external_site
return match.group(1) + CFG_SITE_SECURE_URL + '/sslredirect/'
_ESCAPED_CFG_SITE_URL = cgi.escape(CFG_SITE_URL, True)
_ESCAPED_CFG_SITE_SECURE_URL = cgi.escape(CFG_SITE_SECURE_URL, True)
def https_replace(html):
html = html.replace(_ESCAPED_CFG_SITE_URL, _ESCAPED_CFG_SITE_SECURE_URL)
return _RE_HTTPS_REPLACES.sub(_http_replace_func, html)
class InputProcessed(object):
"""
Auxiliary class used when reading input.
@see: <http://www.wsgi.org/wsgi/Specifications/handling_post_forms>.
"""
def read(self, *args):
raise EOFError('The wsgi.input stream has already been consumed')
readline = readlines = __iter__ = read
class SimulatedModPythonRequest(object):
"""
mod_python like request object.
Minimum and cleaned implementation to make moving out of mod_python
easy.
@see: <http://www.modpython.org/live/current/doc-html/pyapi-mprequest.html>
"""
def __init__(self, environ, start_response):
self.__environ = environ
self.__start_response = start_response
self.__response_sent_p = False
self.__buffer = ''
self.__low_level_headers = []
self.__headers = table(self.__low_level_headers)
self.__headers.add = self.__headers.add_header
self.__status = "200 OK"
self.__filename = None
self.__disposition_type = None
self.__bytes_sent = 0
self.__allowed_methods = []
self.__cleanups = []
self.headers_out = self.__headers
## See: <http://www.python.org/dev/peps/pep-0333/#the-write-callable>
self.__write = None
self.__write_error = False
self.__errors = environ['wsgi.errors']
self.__headers_in = table([])
self.__tainted = False
self.__is_https = int(guess_scheme(self.__environ) == 'https')
self.__replace_https = False
self.track_writings = False
self.__what_was_written = ""
for key, value in environ.iteritems():
if key.startswith('HTTP_'):
self.__headers_in[key[len('HTTP_'):].replace('_', '-')] = value
if environ.get('CONTENT_LENGTH'):
self.__headers_in['content-length'] = environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE'):
self.__headers_in['content-type'] = environ['CONTENT_TYPE']
def get_wsgi_environ(self):
return self.__environ
def get_post_form(self):
self.__tainted = True
post_form = self.__environ.get('wsgi.post_form')
input = self.__environ['wsgi.input']
if (post_form is not None
and post_form[0] is input):
return post_ | form[2]
# This must be done to avoid a bug in cgi.FieldStorage
self.__environ.setdefault('QUERY_STRING', '')
## Video handler hack:
uri = self.__env | iron['PATH_INFO']
if uri.endswith("upload_video"):
tmp_shared = True
else:
tmp_shared = False
fs = FieldStorage(self, keep_blank_values=1, to_tmp_shared=tmp_shared)
if fs.wsgi_input_consumed:
new_input = InputProcessed()
post_form = (new_input, input, fs)
self.__environ['wsgi.post_form'] = post_form
self.__environ['wsgi.input'] = new_input
else:
post_form = (input, None, fs)
self.__environ['wsgi.post_form'] = post_form
return fs
def get_response_sent_p(self):
return self.__response_sent_p
def get_low_level_headers(self):
return self.__low_level_headers
def get_buffer(self):
return self.__buffer
def write(self, string, flush=1):
if isinstance(string, unicode):
self.__buffer += string.encode('utf8')
else:
self.__buffer += string
if flush:
self.flush()
def flush(self):
self.send_http_header()
if self.__buffer:
self.__bytes_sent += len(self.__buffer)
try:
if not self.__write_error:
if self.__replace_https:
self.__write(https_replace(self.__buffer))
else:
self.__write(self.__buffer)
if self.track_writings:
if self.__replace_https:
self.__what_was_written += https_replace(self.__buffer)
else:
self.__what_was_written += self.__buffer
except IOError, err:
if "failed to write data" in str(err) or "client connection closed" in str(err):
## Let's just log this exception without alerting the admin:
register_exception(req=self)
self.__write_error = True ## This flag is there just
## to not report later other errors to the admin.
else:
raise
self.__buffer = ''
def set_content_type(self, content_type):
self.__headers['content-type'] = content_type
if self.__is_https:
if content_type.startswith("text/html") or content_type.startswith("application/rss+xml"):
self.__replace_https = True
def get_content_type(self):
return self.__headers['content-type']
def send_http_header(self):
|
laborautonomo/youtube-dl | youtube_dl/extractor/howcast.py | Python | unlicense | 1,474 | 0.004749 | from __future__ import unico | de_literals
import re
from .common import InfoExtractor
class HowcastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
'md5': '8b743df908c42f60cf6496586c7f12c3',
'info_dict': {
'id': '390161',
| 'ext': 'mp4',
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
'title': 'How to Tie a Square Knot Properly',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
webpage, 'video URL')
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
webpage, 'description', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': self._og_search_title(webpage),
'description': video_description,
'thumbnail': self._og_search_thumbnail(webpage),
}
|
chiehtu/kissaten | manage.py | Python | mit | 311 | 0 | #!/usr/bin/env p | ython
import os
import sys
import dotenv
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings.local")
dotenv.read_dotenv('project/.env')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| |
krishnazure/ansible | lib/ansible/playbook/playbook_include.py | Python | gpl-3.0 | 5,458 | 0.003115 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.taggable import Taggable
from ansible.errors import AnsibleParserError
class PlaybookInclude(Base, Taggable):
_name = FieldAttribute(isa='string')
_include = FieldAttribute(isa='string')
_vars = FieldAttribute(isa='dict', default=dict())
@staticmethod
def load(data, basedir, variable_manager=None, loader=None):
return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
def load_data(self, ds, basedir, variable_manager=None, loader=None):
'''
Overrides the base load_data(), as we're actually going to return a new
Playbook() object rather than a PlaybookInclude object
'''
# import here to avoid a dependency loop
from ansible.playbook import Playbook
# first, we use the original parent method to correctly load the object
# via the load_data/preprocess_data system we normally use for other
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
file_name = new_obj.include
if not os.path.isabs(file_name):
file_name = os.path.join(basedir, file_name)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
for entry in pb._entries:
entry.vars.update(new_obj.vars)
entry.tags = list(set(entry.tags).union(new_obj.tags))
return pb
def preprocess_data(self, ds):
'''
Regorganizes the data for a PlaybookInclude datastructure to line
up with what we expect the proper attributes to be
'''
assert isinstance(ds, dict)
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
for (k,v) in ds.iteritems():
if k == 'include':
self._preprocess_include(ds, new_ds, k, v)
else:
# some basic error checking, to make sure vars are properly
# formatted and do not conflict with k=v parameters
# FIXME: we could merge these instead, but controlling the order
# in which they're encountered could be difficult
if k == 'vars':
if 'vars' in new_ds:
raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
elif not isinstance(v, dict):
raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
return super(PlaybookInclude, self).preprocess_data(new_ds)
def _preprocess_include(self, ds, new_ds, k, v):
'''
Splits the include line up into filename and parameters
'''
# The include line must include at least one item, which is the filename
# to include. Anything after that should be regarded as a parameter to the include
items = split_args(v)
if len(items) == 0:
raise AnsibleParserError("include statements must specify the file name to include", obj=ds) |
else:
# FIXME/TODO: validate that items[0] is a file, which also
# exists and is readable
new_ds['include'] = items[0]
| if len(items) > 1:
# rejoin the parameter portion of the arguments and
# then use parse_kv() to get a dict of params back
params = parse_kv(" ".join(items[1:]))
if 'tags' in params:
new_ds['tags'] = params.pop('tags')
if 'vars' in new_ds:
# FIXME: see fixme above regarding merging vars
raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
new_ds['vars'] = params
|
PRX/Infrastructure | bin/continuity.py | Python | mit | 2,044 | 0.000489 | # TODO WIP this is meant to find discrepencies between the stack templates that
# are deployed to CloudFormation and what is checked in, and do some other
# basic sanity checks on the stacks and their configurations
import boto3
import re
cloudformation = boto3.client("cloudformation")
stacks = cloudformation.describe_stacks()
# Stack Notifications
# Examines all stacks to ensure they have the shared CloudFormation
# notification SNS topic configured as a notification ARN
cfn_topic = (
"arn:aws:sns:us-east-1:561178107736:infrastructure-"
"notifications-CloudFormationNotificationSnsTopic-2OCAWQM7S7BP"
)
print("======================================================================")
print("These stacks do NOT include the notification ARN:")
for stack in stacks["Stacks"]:
if cfn_topic not in stack["NotificationARNs"]:
print(f"{stack['StackName']}")
# Template continuity
# Compares the template for certain stacks, as they exist in CloudFormation,
# to your local copy. If you are on master these should not have any
# differences. The first line each template should contain a relative path
# to the file in the Infrastructure repo. If that path appears to be missing,
# this will report a warning
print("======================================================================")
for stack in stacks["Stacks"]:
cfn_template = cloudformation.get_template(StackName=stack["StackName"])
cfn_body = cfn_template["TemplateBody"]
cfn_first_line = cfn_body.split("\n", 1)[0]
if re.match(r"\# ([a-zA-Z/_\-\.]+yml)", cfn_first_line) is None:
print(f"Missing template path: {stack['StackName']}")
else:
t | emplate_path = re.findall(r"\# ([a-zA-Z/_\-\.]+yml)", cfn_first_line)[0]
local_path = f"../{template_path}"
try:
local_body = open(local_path, "r").read()
except FileNotFoundError:
print(f"File error: {stack['StackName']}")
if not local_body == cfn_body:
print(f"Template mismatch: {stack['StackName' | ]}")
|
jessamynsmith/boards-backend | blimp_boards/accounts/constants.py | Python | agpl-3.0 | 667 | 0 | BLACKLIST_SIGNUP_DOMAINS = ['gmail.com', 'yahoo.com', 'hotmail.com']
ACCOUNT_RESERVED_KEYWORDS = [
'company', 'admin', 'api', 'signout', 'reset', '404', '500',
'docs', 'signup', 'signin', 'invitation', 'chat', 'report', 'community',
'user', 'notification', 'notifications', 'feedback', 'media', 'static',
'uploads', 'users', | 'download', 'downloads', 'reports', 'redeem',
'invitations', '_admin', 'tos', 'privacy', 'login', 'register', 'logout',
'context', 'maintenance', 'error', '__debug__', 'webhook', 'approval',
'import', 'discussions', 'inbound_web | hook', 'workspace', 'workspaces',
'referrals', 'account', 'accounts', 'tasks'
]
|
hqpr/findyour3d | findyour3d/quote/tests.py | Python | mit | 814 | 0.003686 | from django.test import TestCase, Client
from django.urls import resolve
from django.core.urlresolvers import reverse
from django.utils import timezone
from findyour3d.users.models import User
class DashboardTests(TestCase):
def setUp(self):
self.starter_user = User.objects.crea | te(username='test', is_active=True,
email='test@test.com', user_type=2,
payment_active=True, plan=1,
paid_at=timezone.now())
self.starter_user.s | et_password('1234567a')
self.starter_user.save()
self.client = Client()
self.client.login(username='test', password='1234567a')
outdate = timezone.now().date() - timezone.timedelta(days=40)
|
bradleyhd/netsim | mapserver/graph/downloader.py | Python | gpl-3.0 | 659 | 0.007587 | import logging
from urllib.request import urlopen
from shutil import copyfileobj
class MapDownloader(object):
def __init__(self):
self.__log = logging.getLogger(__name__)
def to_file(self, top, left, bottom, right, file_path = 'map.osm'):
self.__log.info('Downloading map data for [%f,%f,%f,% | f]' % (top, left, bottom, right))
url = 'http://www.overpass-api.de/api/xapi?way[bbox=%f,%f,%f,%f][highway=*]' % (left, bottom, right, top)
with urlopen(url) as response, open(file_path, 'wb') as out_file:
copyfileobj(response, out_file)
self.__log.info('Download complet | e, saved as %s' % file_path)
|
xu-cheng/youtube-dl | youtube_dl/postprocessor/execafterdownload.py | Python | unlicense | 865 | 0.002312 | from __future__ import unicode_literals
import subprocess
from .common import PostProcessor
from ..utils | import (
shlex_quote,
PostProcessingError,
)
class ExecAfterDownloadPP(PostProcessor):
def __init__(self, downloader=None, verboseOutput=None, exec_cmd=None):
self.verboseOutput = verboseOutput
self.exec_cmd = exec_cmd
def run(self, information):
cmd = self.exec_cmd
if not '{}' in cmd:
cmd += ' {}'
cmd = cmd.replace('{}', shlex_quote(information['filepath'])) |
self._downloader.to_screen("[exec] Executing command: %s" % cmd)
retCode = subprocess.call(cmd, shell=True)
if retCode != 0:
raise PostProcessingError(
'Command returned error code %d' % retCode)
return None, information # by default, keep file and do nothing
|
rsj217/tornado--scaffold | tornapro/tiger/app/views/auth.py | Python | mit | 1,227 | 0.00326 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'ghost'
import tornado.web
from app import router
f | rom app.helper import BaseAuthHandler
from app.models.auth import User
@router.Route('/auth/login')
class AuthLoginHandler(BaseAuthHandler):
def get(self, *args, **kwargs):
html = ('<form method="post">'
| '<label>username:<label>'
'<input name="username">'
'<input type="submit" value="login">'
'</form>')
self.finish(html)
def post(self, *args, **kwargs):
username = self.get_argument('username')
user = User.findone(username=username)
if self.set_current_user(user):
self.finish('login successful')
else:
self.finish('login fail')
def set_current_user(self, user):
if user:
self.set_secure_cookie('user', str(user.id), httponly=True)
return True
else:
self.clear_cookie("user")
return False
@router.Route('/auth/logout')
class AuthLogoutHandler(BaseAuthHandler):
@tornado.web.authenticated
def get(self, *args, **kwargs):
self.clear_cookie('user')
self.finish('logout') |
phobson/bokeh | tests/conftest.py | Python | bsd-3-clause | 1,448 | 0.001381 | import pytest
from tests.plugins.constants import default_upload
from tests.plugins.upload_to_s3 import upload_file_to_s3_by_job_id
pytest_plugins = (
"tests.examples.examples_report_plugin",
"tests.integration.integration_tests_plugin",
"tests.plugins.bokeh_server",
"tests.plugins.jupyter_notebook",
"tests.plugins.phantomjs_screenshot",
"tests.plugins.image_diff",
"tests.plugins.file_server",
"tests.plugins.upload_to_s3",
)
def pytest_addoption(parser):
parser.addoption(
"--upload", dest="upload", action="store_true", default=default_upload, help="upload test artefacts to S3"
)
parser.addoption(
"--log-file", dest="log_file", metavar="path", action="store", default='examples.log', help="where to write the complete log"
)
def pytest_sessionfinish(session, exitstatus):
try_upload = session.config.option.upload
seleniumreport = session.config.option.htmlpath
is_slave = hasattr(session.config, 'slaveinput')
if try_upload and seleniumreport and not is_slave:
upload_file_to_s3_by_job_id(seleniumreport)
@pytest.yield_fixture(scope="session")
def log_file(request):
is_slave = hasattr(request.config, 'slaveinput')
if | not is_slave:
with open(request.config.option.log_file, 'w') as f:
| # Clean-out any existing log-file
f.write("")
with open(pytest.config.option.log_file, 'a') as f:
yield f
|
Tomohiro/apex-python-boilerplate | functions/example/src/example.py | Python | mit | 271 | 0 | """Example Module name.
Module short description
"""
class Example(object):
"""Class description."""
def __init__(self, event):
self.event = event
def work(self):
"""Return the message."""
return 'Hello, %s!' % self.event | ['name | ']
|
mrZizik/lab_reports | webServicesESLab5/main.py | Python | apache-2.0 | 1,487 | 0.03497 | #!flask/bin/python
from flask import Flask, jsonify, request
app = Flask(__name__)
from pprint import pprint
import json
import urllib2
@app.route('/', methods=['GET'])
def hello():
try:
greetings = urllib2.urlopen("http://188.130.155.37:8080/User").read()
except (urllib2.HTTPError, urllib2.URLError) as e:
greetings = "Hello, User"
return jsonify({'message': greetings + '. Please provide POST query to this server with var1, var2 and meth=(plus, minus, multiply, divide) parameters'})
@app.route('/', methods=['POST'])
def calc():
response = ""
req = json.loads(request.data)
if checkVars(req):
var1=int(req['var1'])
var2=int(req['var2'])
meth=str(req['meth'])
if (req["meth"]=="plus"):
summ = var1+var2
response = str(var1) +" plus " + str(var2) + "=" + str(summ)
elif (req["meth"]=="minus"):
minus = var1-var2
response = str(var1) +" minus " + str(var2) + "=" + str(minus)
elif (req["meth"]=="multiply"):
multiply = var1*var2
r | esponse = str(var1) +" multiply " + str(var2) + "=" + str(multiply)
elif (req["meth"]=="divide"):
divide = var1/var2
response = str(var1) +" divide " + str(var2) + "=" + str(divide)
else:
response = "Don't know that | method " + meth
else:
response = "Check parameters"
return jsonify({'message': response})
def checkVars(req):
return req['var1'].isdigit() and req['var2'].isdigit() and req['meth']!= None
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5005, debug=True)
|
hwaf/hwaf | py-hwaftools/orch/features/feature_download.py | Python | bsd-3-clause | 2,298 | 0.013925 | #!/usr/bin/env python
'''
A feature to download a file.
It requires no previous steps. It provides the 'download_seturl' and
'download' steps
'''
import os
from waflib.TaskGen import feature
import waflib.Logs as msg
from orch.util import urlopen
import orch.features
orch.features.register_defaults(
'download',
download_urlfile = '{package}-{version}.url',
download_url = None,
download_checksum = '',
download_target = None,
)
@feature('download')
def feature_download(tgen):
'''
Download a file.
'''
work_dir = tgen.make_node(tgen.worch.download_dir)
target_filename = tgen.worch.download_target
if not target_filename:
target_filename = os.path.basename(tgen.worch.download_url)
target_node = work_dir.make_node(target_filename)
tgen.step('download_seturl',
rule = "echo '%s' > %s" % (tgen.worch.download_url,
tgen.worch.download_urlfile),
update_outputs = True,
target = tgen.worch.download_urlfile)
def dl_task(task):
src = task.inputs[0]
tgt = task.outputs[0]
url = src.read().strip()
try:
web = urlopen(url)
tgt.write(web.read(),'wb')
except Exception:
import traceback
traceback.print_exc()
msg.error(tgen.worch.format("error downloading {download_url}"))
| raise
| checksum = tgen.worch.download_checksum
if not checksum:
return
hasher_name, ref = checksum.split(":")
import hashlib, os
# FIXME: check the hasher method exists. check for typos.
hasher = getattr(hashlib, hasher_name)()
hasher.update(tgt.read('rb'))
data= hasher.hexdigest()
if data != ref:
msg.error(tgen.worch.format("invalid checksum:\nref: %s\nnew: %s" %\
(ref, data)))
try:
os.remove(tgt.abspath())
except IOError:
pass
return 1
return
tgen.step('download',
rule = dl_task,
source = tgen.worch.download_urlfile,
target = target_node,
cwd = work_dir.abspath())
return
|
tiagofrepereira2012/bob.measure | bob/measure/test_error.py | Python | bsd-3-clause | 13,944 | 0.019722 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Wed 11 Dec 15:14:08 2013 CET
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""Basic tests for the error measuring system of bob
"""
import os
import numpy
import nose.tools
import bob.io.base
def F(f):
"""Returns the test file on the "data" subdirectory"""
import pkg_resources
return pkg_resources.resource_filename(__name__, os.path.join('data', f))
def save(fname, data):
"""Saves a single array into a file in the 'data' directory."""
bob.io.base.Array(data).save(os.path.join('data', fname))
def test_basic_ratios():
from . import farfrr, precision_recall, f_score
# We test the basic functionaly on FAR and FRR calculation. The first
# example is separable, with a separation threshold of about 3.0
positives = bob.io.base.load(F('linsep-positives.hdf5'))
negatives = bob.io.base.load(F('linsep-negatives.hdf5'))
minimum = min(positives.min(), negatives.min())
maximum = max(positives.max(), negatives.max())
# If we take a threshold on the minimum, the FAR should be 1.0 and the FRR
# should be 0.0. Precision should be 0.5, recall should be 1.0
far, frr = farfrr(negatives, positives, minimum-0.1)
nose.tools.eq_(far, 1.0)
nose.tools.eq_(frr, 0.0)
prec, recall = precision_recall(negatives, positives, minimum-0.1)
nose.tools.eq_(prec, 0.5)
nose.tools.eq_(recall, 1.0)
# Similarly, if we take a threshold on the maximum, the FRR should be 1.0
# while the FAR should be 0.0. Both precision and recall should be 0.0.
far, frr = farfrr(negatives, positives, maximum+0.1)
nose.tools.eq_(far, 0.0)
nose.tools.eq_(frr, 1.0)
prec, recall = precision_recall(negatives, positives, maximum+0.1)
nose.tools.eq_(prec, 0.0)
nose.tools.eq_(recall, 0.0)
# If we choose the appropriate threshold, we should get 0.0 for both FAR
# and FRR. Precision will be 1.0, recall will be 1.0
far, frr = farfrr(negatives, positives, 3.0)
nose.tools.eq_(far, 0.0)
nose.tools.eq_(frr, 0.0)
prec, recall = precision_recall(negatives, positives, 3.0)
nose.tools.eq_(prec, 1.0)
nose.tools.eq_(recall, 1.0)
# Testing the values of F-score depending on different choices of the threshold
f_score_ = f_score(negatives, positives, minimum-0.1)
nose.tools.assert_almost_equal(f_score_, 0.66666667)
f_score_ = f_score(negatives, positives, minimum-0.1, 2)
nose.tools.assert_almost_equal(f_score_, 0.83333333)
f_score_ = f_score(negatives, positives, maximum+0.1)
nose.tools.eq_(f_score_, 0.0)
f_score_ = f_score(negatives, positives, maximum+0.1, 2)
nose.tools.eq_(f_score_, 0.0)
f_score_ = f_score(negatives, positives, 3.0)
nose.tools.eq_(f_score_, 1.0)
f_score_ = f_score(negatives, positives, 3.0, 2)
nose.tools.eq_(f_score_, 1.0)
def test_indexing():
from . import correctly_classified_positives, correctly_classified_negatives
# This test verifies that the output of correctly_classified_positives() and
# correctly_classified_negatives() makes sense.
positives = bob.io.base.load(F('linsep-positives.hdf5'))
negatives = bob.io.base.load(F('linsep-negatives.hdf5'))
minimum = min(positives.min(), negatives.min())
maximum = max(positives.max(), negatives.max())
# If the threshold is minimum, we should have all positive samples
# correctly classified and none of the negative samples correctly
# classified.
assert correctly_classified_positives(positives, minimum-0.1).all()
assert not correctly_classified_negatives(negatives, minimum-0.1).any()
# The inverse is true if the threshold is a bit above the maximum.
assert not correctly_classified_positives(positives, maximum+0.1).any()
assert correctly_classified_negatives(negatives, maximum+0.1).all()
# If the threshold separates the sets, than all should be correctly
# classified.
assert correctly_classified_positives(positives, 3).all()
assert correctly_classified_negatives(negatives, 3).all()
def test_thresholding():
from . import eer_threshold, far_threshold, frr_threshold, farfrr, correctly_classified_positives, correctly_classified_negatives, min_hter_threshold
def count(array, value=True):
"""Counts occurrences of a certain value in an array"""
retu | rn list(array == value).count(True)
# This example will demonstrate and check the use of eer_threshold() to
# calculate the threshold that minimizes the EER.
# This test set is not separable.
positives = bob.io.base.load(F('nonsep-positives.hdf5'))
negatives = bob.io.base.load(F('nonsep-negatives.hdf5')) |
threshold = eer_threshold(negatives, positives)
sorted_positives = numpy.sort(positives)
sorted_negatives = numpy.sort(negatives)
# Of course we have to make sure that will set the EER correctly:
ccp = count(correctly_classified_positives(positives,threshold))
ccn = count(correctly_classified_negatives(negatives,threshold))
assert (ccp - ccn) <= 1
for t in (0, 0.001, 0.1, 0.5, 0.9, 0.999, 1):
# Lets also test the far_threshold and the frr_threshold functions
threshold_far = far_threshold(sorted_negatives, [], t, is_sorted=True)
threshold_frr = frr_threshold([], sorted_positives, t, is_sorted=True)
# Check that the requested FAR and FRR values are smaller than the requested ones
far = farfrr(negatives, positives, threshold_far)[0]
frr = farfrr(negatives, positives, threshold_frr)[1]
assert far + 1e-7 > t
assert frr + 1e-7 > t
# test that the values are at least somewhere in the range
assert far-t <= 0.15
assert frr-t <= 0.15
# If the set is separable, the calculation of the threshold is a little bit
# trickier, as you have no points in the middle of the range to compare
# things to. This is where the currently used recursive algorithm seems to
# do better. Let's verify
positives = bob.io.base.load(F('linsep-positives.hdf5'))
negatives = bob.io.base.load(F('linsep-negatives.hdf5'))
threshold = eer_threshold(negatives, positives)
# the result here is 3.2 (which is what is expect ;-)
assert threshold == 3.2
# Of course we have to make sure that will set the EER correctly:
ccp = count(correctly_classified_positives(positives,threshold))
ccn = count(correctly_classified_negatives(negatives,threshold))
nose.tools.eq_(ccp, ccn)
# The second option for the calculation of the threshold is to use the
# minimum HTER.
threshold2 = min_hter_threshold(negatives, positives)
assert threshold2 == 3.2
nose.tools.eq_(threshold, threshold2) #in this particular case
# Of course we have to make sure that will set the EER correctly:
ccp = count(correctly_classified_positives(positives,threshold2))
ccn = count(correctly_classified_negatives(negatives,threshold2))
nose.tools.eq_(ccp, ccn)
def test_plots():
from . import eer_threshold, roc, roc_for_far, precision_recall_curve, det, epc
# This test set is not separable.
positives = bob.io.base.load(F('nonsep-positives.hdf5'))
negatives = bob.io.base.load(F('nonsep-negatives.hdf5'))
threshold = eer_threshold(negatives, positives)
# This example will test the ROC plot calculation functionality.
xy = roc(negatives, positives, 100)
# uncomment the next line to save a reference value
# save('nonsep-roc.hdf5', xy)
xyref = bob.io.base.load(F('nonsep-roc.hdf5'))
assert numpy.array_equal(xy, xyref)
# This example will test the ROC for FAR plot calculation functionality.
far = [0.01, 0.1, 1]
ref = [0.48, 0.22, 0]
xy = roc_for_far(negatives, positives, far)
# uncomment the next line to save a reference value
assert numpy.array_equal(xy[0], far)
assert numpy.array_equal(xy[1], ref)
# This example will test the Precision-Recall plot calculation functionality.
xy = precision_recall_curve(negatives, positives, 100)
# uncomment the next line to save a reference value
# save('nonsep-roc.hdf5', xy)
xyref = bob.io.base.load(F('nonsep-precisionrecall.hdf5'))
assert numpy.array_equal(xy, xyref)
# This example will test the DET plot calculation functionality.
det_xyzw = det(negatives, positives, 100)
# uncomment the next line to sa |
agry/NGECore2 | scripts/static_spawns/tatooine/jabba_tp_romo_vax_bunker.py | Python | lgpl-3.0 | 4,612 | 0.014961 | import sys
# Project SWG: Jabba TP Romo Vax Bunker: Static Spawns
# (C)2014 ProjectSWG
from resources.datatables import Options
from resources.datatables import State
def addPlanetSpawns(core, planet):
stcSvc = core.staticService
objSvc = core.objectService
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5079.1), float(47.9), float(-6970.5), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5063.5), float(49.2), float(-6998.4), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0 | ), float(-5051.8), float(45.7), float(-6989.7), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5030.7), float(46.5), float(-6972.5), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5019), float(48.3), float(-6946.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObj | ect('romovax_henchman', 'tatooine', long(0), float(-5045.8), float(42.6), float(-6936), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5053.7), float(43.6), float(-6961.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', long(0), float(-5057.5), float(43.9), float(-6961.5), float(0), float(0), float(0), float(0), 45)
# TODO Check all NPCs for personalized scripting, change format.
bunker = core.objectService.getObject(long(-466404037494797872))
if bunker is not None:
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(2), float(-3.8), float(0.3), float(2.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(3), float(3.6), float(0.3), float(-3.7), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(5), float(29.8), float(-12), float(25.4), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(5), float(32.7), float(-12), float(35), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(5), float(22.9), float(-12), float(30.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(4), float(3.9), float(-12), float(21), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(4), float(3.9), float(-12), float(38.1), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(6), float(3.5), float(-16), float(53), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(58.8), float(-16), float(61), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(74.4), float(-16), float(66.6), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(68.3), float(-16), float(79.2), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(8), float(44.6), float(-16), float(82.9), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(26), float(-16), float(79), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(6.9), float(-16), float(78.1), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(-5.2), float(-16), float(77.5), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(7), float(-19.9), float(-16), float(78.8), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('romovax_henchman', 'tatooine', bunker.getCellByCellNumber(9), float(32.4), float(-14), float(78.7), float(0), float(0), float(0), float(0), 45)
stcSvc.spawnObject('fighting_romo_vax', 'tatooine', bunker.getCellByCellNumber(9), float(-43.5), float(-14), float(-78.9), float(0), float(0), float(0), float(0), 45)
return
|
wagnerandreoli/clitoolkit | clit/dev/packaging.py | Python | bsd-3-clause | 17,451 | 0.003382 | # -*- coding: utf-8 -*-
"""Packaging tools to publish projects on PyPI and GitHub."""
import os
import sys
from pathlib import Path
from shutil import rmtree
from textwrap import dedent
from typing import List, Optional, Tuple
import click
from clit import DRY_RUN_OPTION
from clit.files import shell
from clit.ui import prompt
HeaderCommand = Tuple[str, str]
class Publisher:
"""Helper to publish packages."""
TOOL_BUMPVERSION = "bumpversion"
TOOL_CONVENTIONAL_CHANGELOG = "conventional-changelog"
TOOL_POETRY = "poetry"
TOOL_GIT = "git"
TOOL_HUB = "hub"
TOOL_TWINE = "twine"
TOOL_CONVENTIONAL_GITHUB_RELEASER = "conventional-github-releaser"
NEEDED_TOOLS = {
TOOL_BUMPVERSION: "Install from https://github.com/peritus/bumpversion#installation and configure setup.cfg",
TOOL_CONVENTIONAL_CHANGELOG: (
"Install from https://github.com/conventional-changelog/conventional-changelog/tree/master"
+ "/packages/conventional-changelog-cli#quick-start"
),
TOOL_POETRY: "Install from https://github.com/sdispater/poetry#installation",
TOOL_GIT: "Install using your OS package tools",
TOOL_HUB: "Install from https://github.com/github/hub#installation",
TOOL_TWINE: "Install from https://github.com/pypa/twine#installation",
TOOL_CONVENTIONAL_GITHUB_RELEASER: (
"Install from https://github.com/conventional-changelog/releaser-tools/tree"
+ "/master/packages/conventional-github-releaser#quick-start and configure a GitHub Access token"
),
}
NEEDED_FILES = {
"package.json": (
f"Used by {TOOL_CONVENTIONAL_CHANGELOG}. See https://github.com/conventional-changelog/"
+ "conventional-changelog/blob/master/packages/conventional-changelog-cli/package.json"
)
}
# https://github.com/peritus/bumpversion
CMD_BUMP_VERSION = TOOL_BUMPVERSION + " {allow_dirty} {part}"
CMD_BUMP_VERSION_SIMPLE_CHECK = f"{CMD_BUMP_VERSION} --dry-run"
CMD_BUMP_VERSION_VERBOSE = f"{CMD_BUMP_VERSION_SIMPLE_CHECK} --verbose 2>&1"
CMD_BUMP_VERSION_VERBOSE_FILES = f"{CMD_BUMP_VERSION_VERBOSE} | grep -i -E -e '^would'"
CMD_BUMP_VERSION_GREP = f'{CMD_BUMP_VERSION_VERBOSE} | grep -i -E -e "would commit to git.+bump" -e "^new version" | grep -E -o "\'(.+)\'"'
# https://github.com/conventional-changelog/conventional-changelog/tree/master/packages/conventional-changelog-cli
CMD_CHANGELOG = f"{TOOL_CONVENTIONAL_CHANGELOG} -i CHANGELOG.md -p angular"
CMD_BUILD_SETUP_PY = "python setup.py sdist bdist_wheel --universal"
# https://poetry.eustace.io/
CMD_POETRY_BUILD = f"{TOOL_POETRY} build"
CMD_GIT_ADD_AND_COMMIT = TOOL_GIT + " add . && git commit -m'{}' --no-verify"
CMD_GIT_PUSH = f"{TOOL_GIT} push"
CMD_GIT_CHECKOUT_MASTER = f"echo {TOOL_GIT} checkout master && echo {TOOL_GIT} pull"
# https://github.com/pypa/twine
# I tried using "poetry publish -u $TWINE_USERNAME -p $TWINE_PASSWORD"; the command didn't fail,
# but nothing was uploaded
# I also tried setting $TWINE_USERNAME and $TWINE_PASSWORD on the environment,
# but then "twine upload" didn't work for some reason.
CMD_TWINE_UPLOAD = TOOL_TWINE + " upload {repo} dist/*"
# https://www.npmjs.com/package/conventional-github-releaser
CMD_GITHUB_RELEASE = TOOL_CONVENTIONAL_GITHUB_RELEASER + " -p angular -v --token {}"
CMD_MANUAL_GITHUB_RELEASE = f"echo {TOOL_HUB} browse"
CMD_GITHUB_RELEASE_ENVVAR = "CONVENTIONAL_GITHUB_RELEASER_TOKEN"
def __init__(self, dry_run: bool):
self.dry_run = dry_run
self.github_access_token: Optional[str] = None
@classmethod
def part_option(cls):
"""Add a --part option."""
return click.option(
"--part",
"-p",
default="minor",
type=click.Choice(["major", "minor", "patch"]),
help="Which part of the version number to bump",
)
@classmethod
def allow_dirty_option(cls):
"""Add a --allow-dirty option."""
return click.option(
"--allow-dirty",
"-d",
default=False,
is_flag=True,
type=bool,
help="Allow bumpversion to run on a dirty repo",
)
@classmethod
def github_access_token_option(cls):
"""Add a --github-access-token option."""
return click.option(
"--github-access-token",
"-t",
help=(
f"GitHub access token used by {cls.TOOL_CONVENTIONAL_GITHUB_RELEASER}. If not defined, will use the value"
+ f" from the ${cls.CMD_GITHUB_RELEASE_ENVVAR} environment variable"
),
)
def check_tools(self, github_access_token: str = None) -> None:
"""Check if all needed tools and files are present."""
all_ok = True
for executable, help_text in self.NEEDED_TOOLS.items():
output = shell(f"which {executable}", quiet=True, return_lines=True)
if not output:
click.secho(f"Executable not found on the $PATH: {executable}. {help_text}", fg="bright_red")
all_ok = False
for file, help_text in self.NEEDED_FILES.items():
path = Path(file)
if not path.exists():
click.secho(f"File not found: {path}. {help_text}", fg="bright_red")
all_ok = False
if github_access_token:
self.github_access_token = github_access_token
else:
error_message = "Missing access token"
if self.CMD_GITHUB_RELEASE_ENVVAR in os.environ:
variable = self.CMD_GITHUB_RELEASE_ENVVAR
else:
token_keys = {k for k in os.environ.keys() if "github_access_token".casefold() in k.casefold()}
if len(token_keys) == 1:
variable = token_keys.pop()
else:
variable = ""
error_message = f"You have multiple access tokens: {', '.join(token_keys)}"
if variable:
self.github_access_token = os.environ[variable]
click.echo(f"Using environment variable {variable} as GitHub access token")
else:
click.secho(f"{error_message}. ", fg="bright_red", nl=False)
click.echo(
f"Set the variable ${self.CMD_GITHUB_RELEASE_ENVVAR} or use"
+ " --github-access-token to define a GitHub access token"
)
all_ok = False
if self.dry_run:
return
if all_ok:
click.secho(f"All the necessary tools are installed.", fg="bright_white")
else:
click.secho("Install the tools and create the missing files.")
exit(1)
@classmethod
def _bump(cls, base_command: str, part: str, allow_dirty: bool):
"""Prepare the bump command."""
return base_command.format(allow_dirty="--allow-dirty" if allow_dirty else "", part=part)
def check_bumped_version(self, part: str, allow_dirty: bool) -> Tuple[str, str]:
"""Check the version that will be bumped."""
shell(
self._bump(self.CMD_BUMP_VERSION_SIMPLE_CHECK, part, allow_dirty),
exit_on_failure=True,
header="Check the version that will be bumped",
)
bump_cmd = self._bump(self.CMD_BUMP_VERSION_VERBOSE_FILES, part, allow_dirty)
shell(bump_cmd, dry_run=self.dry_run, header=f"Display what file | s would be changed", exit_on_failure=True)
if not self.dry_run:
chosen_lines = shell(self._bump(self.CMD_BUMP_VERSION_GREP, part, allow_dirty), return_lines=True)
new_version = chosen_lines[0].strip("'")
commit_message = chosen_lines[1].strip("'").lower()
click.echo(f"New version: {new_version}\nCommit message: {commit_message}")
prompt("Were all versions correctly displayed?")
else:
commit_message = "bump version f | rom X to Y"
new_version = "<new ve |
skorokithakis/django-fancy-cache | fancy_tests/tests/urls.py | Python | bsd-3-clause | 239 | 0.008368 | from django | .conf.urls.defaults import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.home, name='home'),
#url(r'^2$', views.home2, name='home2'),
#url(r'^3$', view | s.home3, name='home3'),
)
|
nzlosh/st2 | contrib/runners/inquirer_runner/setup.py | Python | apache-2.0 | 1,838 | 0.000544 | # -*- coding: utf-8 -*-
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of t | he License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITION | S OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os.path
from setuptools import setup
from setuptools import find_packages
from dist_utils import fetch_requirements
from dist_utils import apply_vagrant_workaround
from inquirer_runner import __version__
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt")
install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
apply_vagrant_workaround()
setup(
name="stackstorm-runner-inquirer",
version=__version__,
description=(
"Inquirer action runner for StackStorm event-driven automation platform"
),
author="StackStorm",
author_email="info@stackstorm.com",
license="Apache License (2.0)",
url="https://stackstorm.com/",
install_requires=install_reqs,
dependency_links=dep_links,
test_suite="tests",
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=["setuptools", "tests"]),
package_data={"inquirer_runner": ["runner.yaml"]},
scripts=[],
entry_points={
"st2common.runners.runner": [
"inquirer = inquirer_runner.inquirer_runner",
],
},
)
|
heibanke/python_do_something | Code/Chapter2/homework2-4_csv_ex.py | Python | apache-2.0 | 647 | 0.017002 | #!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke
import csv
import re
csvfile = o | pen('beijing_jt.csv','r')
reader = csv.reade | r(csvfile)
# reader.next() only can use in py2
next(reader)
jt_info = next(reader)
print(jt_info[1].decode('utf-8'))
csvfile.close()
# convert stations info format
station_pattern = (r'(?P<number>[0-9]+)\s(?P<name>\D+)')
station_list = []
stations = re.findall(station_pattern,jt_info[-1].decode('utf-8'))
for tmp in stations:
print(tmp[0],tmp[1].strip())
station_list.append(tmp[1].strip())
result={}
result[jt_info[1]]=station_list
print(result) |
LagrangianPoint/Apache-Rewrite-Maps-Python | under2space.py | Python | mit | 754 | 0.015915 | #!/usr/bin/python -u
"""
This script replaces underscores with spaces (%20)
Project: https://github.com/LagrangianPoint/Apa | che-Rewrite-Maps-Python/
http://httpd.apache.org/docs/current/rewrite/rewritemap.html
http://fragmentsofcode.wordpress.com/2009/02/04/python-script-for-apache-rewritemap/
http://codeblow | .com/questions/apache2-rewritemap-python-when-coming-back-null-apache-dangles/
HINTS FOR DEBUGGING:
RewriteEngine On
RewriteLogLevel 9
RewriteLog /var/log/apache2/rewrite.log
"""
import sys
while sys.stdin:
try:
strLine = sys.stdin.readline().strip() ## It is very important to use strip!
strLine = strLine.replace('_', ' ')
print strLine
sys.stdout.flush()
except:
print 'NULL'
sys.stdout.flush()
|
sdl-static/ircbot-collection | beanbot.py | Python | gpl-2.0 | 7,221 | 0.014402 | #!/usr/bin/env python
#
# Simple IRC Bot to announce messages
#
# Code originally based on example bot and irc-bot class from
# Joel Rosdahl <joel@rosdahl.net>, author of included python-irclib.
#
"""An IRC bot to announce messages on a channel.
This is an example bot that uses the SingleServerIRCBot class from
ircbot.py. The bot enters a channel and relays messages fed to it
via some means (currently UDP).
"""
import sys, string, random, time, os, fcntl
from ircbot import SingleServerIRCBot
import irclib
from irclib import nm_to_n, nm_to_h, irc_lower, parse_channel_modes
from botcommon import OutputManager
from threading import Thread
svn_url = \
"$URL$"
svn_url = svn_url[svn_url.find(' ')+1:svn_url.rfind('/')+1]
class Bot(SingleServerIRCBot):
def __init__(self, channel, nickname, nickpass, ircaddr, udpaddr,
debug=False):
SingleServerIRCBot.__init__(self, [ircaddr], nickname, nickname, 5)
self.channel = channel
# self.nickname is the nickname we _want_. The nickname we actually
# have at any particular time is c.get_nickname().
self.nickname = nickname
self.nickpass = nickpass
self.debug = debug
self.queue = OutputManager(self.connection, .9)
self.queue.start()
self.inputthread = UDPInput(self, udpaddr)
self.inputthread.start()
try:
self.start()
except KeyboardInterrupt:
self.connection.quit("Ctrl-C at console")
print "Quit IRC."
except Exception, e:
self.connection.quit("%s: %s" % (e.__class__.__name__, e.args))
raise
_uninteresting_events = {
'all_raw_messages': None,
'yourhost': None,
'created': None,
'myinfo': None,
'featurelist': None,
'luserclient': None,
'luserop': None,
'luserchannels': None,
'luserme': None,
'n_local': None,
'n_global': None,
'luserconns': None,
'motdstart': None,
'motd': None,
'endofmotd': None,
'topic': None,
'topicinfo': None,
'ping': None,
}
def _dispatcher(self, c, e):
if self.debug:
eventtype = e.eventtype()
if eventtype not in self._uninteresting_events:
source = e.source()
if source is not None:
source = nm_to_n(source)
else:
source = ''
print "E: %s (%s->%s) %s" % (eventtype, source, e.target(),
e.arguments())
SingleServerIRCBot._dispatcher(self, c, e)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_join(self, c, e):
nick = nm_to_n(e.source())
if nick == c.get_nickname():
chan = e.target()
self.connection.mode(self.channel, '')
def on_channelmodeis(self, c, e):
c._handle_event(
irclib.Event("mode", e.source(), e.arguments()[0], [e.arguments()[1]]))
def on_quit(self, c, e):
source = nm_to_n(e.source())
if source == self.nickname:
# Our desired nick just quit - take the nick back
c.nick(self.nickname)
def on_welcome(self, c, e):
c.join(self.channel)
if self.nickpass and c.get_nickname() != self.nickname:
# Reclaim our desired nickname
c.privmsg('nickserv', 'ghost %s %s' % (self.nickname, self.nickpass))
def on_privnotice(self, c, e):
source = e.source()
if source and irc_lower(nm_to_n(source)) == 'nickserv':
if e.arguments()[0].find('IDENTIFY') >= 0:
# Received request to identify
if self.nickpass and self.nickname == c.get_nickname():
self.queue.send('identify %s' % self.nickpass, 'nickserv')
def on_privmsg(self, c, e):
self.do_command(e, e.arguments()[0])
def on_pubmsg(self, c, e):
a = string.split(e.arguments()[0], ":", 1)
if len(a) > 1 and irc_lower(a[0]) == irc_lower(c.get_nickname()):
self.do_command(e, string.strip(a[1]))
def say_public(self, text):
"Print TEXT into public channel, for all to see."
self.queue.send(text, self.channel)
def say_private(self, nick, text):
"Send private message of TEXT to NICK."
self.queue.send(text,nick)
def reply(self, e, text):
"Send TEXT to public channel or as private msg, in reply to event E."
if e.eventtype() == "pubmsg":
self.say_public("%s: %s" % (nm_to_n(e.source()), text))
else:
self.say_private(nm_to_n(e.source()), text)
def cmd_help(self, args, e):
cmds = [i[4:] for i in dir(self) if i.startswith('cmd_')]
self.reply(e, "Valid commands: '%s'" % "', '".join(cmds))
#def cmd_renick(self, args, e):
# if len(args) != 1:
# self.reply(e, "Usage: renick <nick>")
# return
# self.connection.nick(args[0])
def cmd_about(self, args, e):
self.r | eply(e, "I am a bot written in Python "
"using the python-irclib library")
self.reply(e, "My source code is available at %s" % svn_url)
def | do_command(self, e, cmd):
"""This is the function called whenever someone sends a public or
private message addressed to the bot. (e.g. "bot: blah"). Parse
the CMD, execute it, then reply either to public channel or via
/msg, based on how the command was received. E is the original
event, and FROM_PRIVATE is the nick that sent the message."""
cmds = cmd.strip().split(" ")
try:
cmd_handler = getattr(self, "cmd_" + cmds[0])
except AttributeError:
cmd_handler = None
if cmd_handler:
cmd_handler(cmds[1:], e)
return
self.reply(e, "I don't understand '%s'."%(cmd))
botname = 'beanbot'
def usage(exitcode=1):
print "Usage: %s.py [-d] [<config-file>]" % botname
sys.exit(exitcode)
def parse_host_port(hostport, default_port=None):
lis = hostport.split(":", 1)
host = lis[0]
if len(lis) == 2:
try:
port = int(lis[1])
except ValueError:
print "Error: Erroneous port."
sys.exit(1)
else:
if default_port is None:
print "Error: Port required in %s." % hostport
sys.exit(1)
port = default_port
return host, port
def main():
import getopt
try:
opts, args = getopt.gnu_getopt(sys.argv, 'd', ('debug',))
except getopt.GetoptError:
usage()
debug = False
for opt, val in opts:
if opt in ('-d', '--debug'):
debug = True
if len(args) not in (1, 2):
usage()
if len(args) > 1:
configfile = args[1]
else:
configfile = '%s.conf' % botname
import ConfigParser
c = ConfigParser.ConfigParser()
c.read(configfile)
cfgsect = botname
ircaddr = parse_host_port(c.get(cfgsect, 'host'), 6667)
channel = c.get(cfgsect, 'channel')
nickname = c.get(cfgsect, 'nickname')
try:
nickpass = c.get(cfgsect, 'nickpass')
except ConfigParser.NoOptionError:
nickpass = None
udpaddr = parse_host_port(c.get(cfgsect, 'udp-addr'))
Bot(channel, nickname, nickpass, ircaddr, udpaddr, debug)
class UDPInput(Thread):
def __init__(self, bot, addr):
Thread.__init__(self)
self.setDaemon(1)
self.bot = bot
from socket import socket, AF_INET, SOCK_DGRAM
self.socket = socket(AF_INET, SOCK_DGRAM)
self.socket.bind(addr)
def run(self):
while 1:
data, addr = self.socket.recvfrom(1024)
self.bot.say_public(data)
if __name__ == "__main__":
#try:
main()
#except KeyboardInterrupt:
# print "Caught Ctrl-C during initialization."
|
Straor/Prog | Python/prog13.py | Python | mit | 255 | 0.031373 | # -*- coding: utf-8 -*-
# programme qui demande un nombre et affiche les 10 triples successifs
chaine = input("donne un nombre : ")
nombre = int(chaine)
triple = nombre
compteur=1
while(compteur<=10):
triple=triple*3
print(triple)
| compteur=compteur+1
| |
peterhinch/micropython-tft-gui | tft/demos/dialog.py | Python | mit | 2,864 | 0.044693 | # dialog.py Test/demo of modal dialog box for Pybboard TFT GUI
# Adapted for (and requires) uasyncio V3
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2016-2020 Peter Hinch
from tft.driver.constants import *
from tft.driver.tft_local import setup
from tft.driver.ugui import Screen, Aperture
from tft.widgets.dialog import DialogBox
from tft.widgets.buttons import Button
from tft.widgets.label import Label
from tft.fonts import font14
from tft.fonts import font10
# STANDARD BUTTONS
def quitbutton(x, y):
def quit(button):
Screen.shutdown()
Button((x, y), height = 30, font = font14, callback = quit, fgcolor = RED,
text = 'Quit', shape = RECTANGLE, width = 80)
def fwdbutton(x, y, cls_screen, *, text='Next', args=[], kwargs={}):
def fwd(button):
Screen.change(cls_screen, args = args, kwargs = kwargs)
Button((x, y), height = 30, font = font14, callback = fwd, fgcolor = RED,
text = text, shape = RECTANGLE, width = 80)
# Demo of creating a dialog manually
class UserDialogBox(Aperture):
def __init__(self):
height = 150
width = 220
super().__init__((20, 20), height, width, bgcolor = DARKGREEN)
y = self.height - 50
Button(self.locn(20, y), height = 30, width = 80, font = font14, fontcolor = BLACK, fgcolor = RED,
text = 'Cat', shape = RECTANGLE,
callback = se | lf.back, args = ('Cat',))
Button(self.locn(120, y), height = 30, width = 80, font = font14, fontcolor = BLACK, fgcolor = GREEN,
text = 'Dog', shape = RECTANGLE,
callback = self.back, args = ('Dog',))
Button(self.locn(width - 26, 1), height = 25, width = 25, font = font10,
fgcolor = RED, text = 'X', shape = RECTANGLE, |
callback = self.back, args = ('Close',))
def back(self, button, text):
Aperture.value(text)
Screen.back()
class BaseScreen(Screen):
def __init__(self):
super().__init__()
Label((0, 0), font = font14, value = 'Dialog box demonstration.')
Label((0, 100), font = font10, value = 'User written and gdialog generated')
self.lbl_result = Label((10, 50), font = font10, fontcolor = WHITE, width = 70, border = 2,
fgcolor = RED, bgcolor = DARKGREEN)
# User written dialog
fwdbutton(195, 242, UserDialogBox, text = 'User')
# Dialog built using gdialog.py DialogBox
dialog_elements = (('Yes', GREEN), ('No', RED), ('Foo', YELLOW))
fwdbutton(0, 242, DialogBox, text = 'Gen', args = (font14,),
kwargs = {'elements' : dialog_elements, 'label' : 'Test dialog'})
quitbutton(390, 242)
def on_open(self):
self.lbl_result.value(Aperture.value())
def test():
setup()
Screen.change(BaseScreen)
test()
|
JulianVolodia/Politikon | accounts/migrations/0020_userprofile_last_transaction.py | Python | gpl-2.0 | 428 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0019_auto_20160518_0048'),
]
operations = [
migrations.AddField(
model_name='u | serprofile',
name='last_transaction',
field=models.DateTimeField(null=True, blank=True | ),
),
]
|
genialis/resolwe-bio | resolwe_bio/tests/workflows/test_mirna.py | Python | apache-2.0 | 2,406 | 0.001247 | from resolwe.flow.models import Data
from resolwe.test import tag_process, with_resolwe_host
from resolwe_bio.utils.test import KBBioProcessTestCase
class MicroRNATestCase(KBBioProcessTestCase):
@with_resolwe_host
@tag_process("workflow-mirna")
def test_mirna_workflow(self):
# Prepare data for aligning the reads with bowtie2 and annotation file for featureCounts.
with self.preparation_stage():
inputs = {
"src": "genome_rsem.fa.gz",
"specie | s": "Homo sapiens",
"build": "fake_genome_RSEM",
}
ref_seq = self.run_process("upload-fasta-nucl", inputs)
bowtie2_index = self.run_process("bowtie2-index", {"ref_seq": ref_seq.id})
single_rea | ds = self.prepare_reads(["reads rsem.fq.gz"])
annotation = self.prepare_annotation(
"annotation_rsem.gtf.gz",
species="Homo sapiens",
build="fake_genome_RSEM",
)
inputs = {
"preprocessing": {
"reads": single_reads.pk,
"adapters": {"down_primers_seq": ["TAATGAACAATGCAAGTTTGA"]},
"filtering": {"minlen": 15, "maxlen": 35, "error_rate": 0.2},
},
"alignment": {
"genome": bowtie2_index.pk,
"alignment_options": {
"mode": "--local",
"speed": "--very-sensitive",
"L": 8,
"rep_mode": "k",
"k_reports": 5,
},
},
"quant_options": {
"annotation": annotation.pk,
"id_attribute": "gene_id",
"feature_class": "exon",
"normalization_type": "CPM",
"count_multi_mapping_reads": True,
"allow_multi_overlap": True,
},
"assay_type": "non_specific",
}
# Run process and assert.
self.run_process("workflow-mirna", inputs)
workflow = Data.objects.filter(process__slug="feature_counts").last()
# check featureCount summary
self.assertFile(
workflow, "rc", "mirna_featurecounts_rc.tab.gz", compression="gzip"
)
self.assertFile(
workflow, "exp", "mirna_featurecounts_cpm.tab.gz", compression="gzip"
)
|
udayinfy/openerp-7.0 | move_reports/stock_move_report/stock_move_report.py | Python | agpl-3.0 | 23,230 | 0.008437 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: LIN Yu <lin.yu@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it wil l be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
import openerp.addons.decimal_precision as dp
import time
from datetime import datetime
import pytz
import os,glob
import csv,xlwt
from xlsxwriter.workbook import Workbook
import shutil
import base64
from tools.translate import _
import logging
_logger = logging.getLogger(__name__)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class stock_move_report_wizard(osv.osv_memory):
_name = 'stock.move.report.wizard'
_description = 'Stock Move Report Wizard'
_columns = {
'start_date': fields.datetime('Start Date'),
'end_date': fields.datetime('End Date'),
'type': fields.selection([('in','In'),('out','Out'),('internal','Internal'),('scrap','Scrap'),('consumption','Consumption'),('production','production'),('all','all')],string='Type',required=True),
}
_defaults = {
'start_date': lambda *a: time.strftime('%Y-%m-%d 16:00: | 00'),
'end_date': lambda *a: time.strftime('%Y-%m-%d 15:59:59'),
'type': 'in',
}
def generate_report(self, cr, uid, ids, context=None):
| if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
context['start_date'] = data.start_date
context['end_date'] = data.end_date
context['type'] = data.type
pi_obj = self.pool.get('stock.move.report')
pi_obj.generate_report(cr, uid, context)
mod_obj = self.pool.get('ir.model.data')
res = mod_obj.get_object_reference(cr, uid, 'move_reports', 'view_move_report_tree')
res_id = res and res[1] or False,
return {
'name': _('Stock Move Report'),
'view_type': 'form',
'view_mode': 'tree',
'view_id': res_id,
'res_model': 'stock.move.report',
'context': "{}",
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': False,
}
stock_move_report_wizard()
class stock_move_report(osv.osv):
_name = 'stock.move.report'
_description = 'Stock Move Report'
_rec_name = "move_id"
_order = 'date desc'
_create_sql = """
INSERT INTO stock_move_report
(
create_uid,
write_uid,
create_date,
write_date,
move_id,
date,
date_expected,
origin,
picking_id,
picking_name,
type,
pick_return,
partner_ref,
partner_id,
partner_name,
stock_type_id,
stock_type_name,
category_id,
category_name,
product_sku,
product_id,
product_name,
move_qty,
product_qty,
uom_id,
uom_name,
product_uom_name,
uom_factor,
product_price,
price_unit,
cost_total,
po_price,
amount_total,
loc_name,
loc_dest_name,
return_reason
)
SELECT %d, %d, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
m.id as move_id, m.date, m.date_expected, m.origin,
p.id as picking_id, p.name as picking_name,
p.type as type, p.return as pick_return,
rp.ref as partner_ref, rp.id as partner_id, rp.name as partner_name,
st.id as stock_type_id , st.name as stock_type_name,
c.id as category_id, cp.name || ' / ' || c.name as category_name,
m.product_code as product_sku, pp.id as product_id, pt.name as product_name,
m.product_qty as move_qty, m.product_qty * pu.factor / u.factor as product_qty,
u.id as uom_id, u.name as uom_name, pu.name as product_uom_name, pu.factor / u.factor as uom_facotr,
m.price_unit / pu.factor * u.factor as product_price, m.price_unit as price_unit, round(m.product_qty * pu.factor / u.factor*m.price_unit, 4) as cost_total,
m.po_price as po_price, m.amount_total as amount_total,
sl.complete_name as location_name,
sld.complete_name as location_dest_name,
srr.code as return_reason
from stock_move m
left join stock_picking p on p.id = m.picking_id
left join product_product pp on pp.id = m.product_id
left join product_template pt on pt.id = pp.product_tmpl_id
left join product_category c on c.id = pt.categ_id
left join product_category cp on cp.id = c.parent_id
left join product_stock_type st on st.id = pt.stock_type_id
left join product_uom u on u.id = m.product_uom
left join product_uom pu on pu.id = pt.uom_id
left join res_partner rp on rp.id = m.partner_id
left join stock_location sl on sl.id = m.location_id
left join stock_location sld on sld.id = m.location_dest_id
left join stock_return_reason srr on srr.id = m.return_reason_id
where %s
order by m.id
"""#uid,uid,domain
_reverse_sql = """
INSERT INTO stock_move_report
(
create_uid,
write_uid,
create_date,
write_date,
move_id,
date,
date_expected,
origin,
picking_id,
picking_name,
type,
pick_return,
partner_ref,
partner_id,
partner_name,
stock_type_id,
stock_type_name,
category_id,
category_name,
product_sku,
product_id,
product_name,
move_qty,
product_qty,
uom_id,
uom_name,
product_uom_name,
uom_factor,
|
norayr/unisubs | utils/management/commands/get_memcached.py | Python | agpl-3.0 | 1,043 | 0.006711 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You shou | ld have received a copy of the GNU Affero General Public License
# along with this | program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.cache import cache
class Command(BaseCommand):
help = u'Get a test value from memcached'
def handle(self, *args, **kwargs):
return cache.get('test-cache')
|
jazzband/django-axes | axes/management/commands/axes_reset_logs.py | Python | mit | 643 | 0 | from django.core.management.base import BaseCommand
from axes.handlers.proxy import AxesProxyHandler
class Command(BaseCommand):
| help = "Reset access log records older than given days."
def add_arguments(self, parser):
parser.add_argument(
"--age",
type=int,
default=30,
help="Maximum age for records to keep in days",
)
def handle(self, *args, **opti | ons):
count = AxesProxyHandler.reset_logs(age_days=options["age"])
if count:
self.stdout.write(f"{count} logs removed.")
else:
self.stdout.write("No logs found.")
|
dolejarz/engsci_capstone_transport | python/temporal_analysis/subway_trips.py | Python | mit | 1,027 | 0.008763 | import pandas as pd
import matplotlib.pyplot as plt
df_subway = pd.read_csv('/Users/dolejarz/Documents/Engineering Science/4th Year/CIV455/github/engsci_capstone_transport/gis/subway_buffer/trips_in_buffer.csv')
df_all = pd.read_csv('/Users/dolejarz/Documents/Engineering Science/4th Year/CIV455/github/engsci_capstone_transport/csv/Trips_Oct_31_2017.csv')
df_subway['tx'] = pd.to_datetime(df_subway['Date'])
df_all['tx'] = pd.to_datetime(df_all['Date'])
df_subway['start_hour'] = df_subway['tx'].dt.hour
df_all['start_hour'] = df_all['tx'].dt.hour
pt_subway = pd.pivot_table(df_subway,index='start_hour',aggfunc='count')
pt_all = pd.pi | vot_table(df_all,index='start_hour',aggfunc='count')
#series of hourly distribution of trips as a percent of daily total
subway_percent = pt_subway['tx']/float(pt_subway['tx'].sum())
all_percent = pt_all['tx']/float(pt_all['tx'].sum())
pl | t.figure(figsize=(12,8), dpi=300)
plt.plot(range(24), subway_percent)
plt.plot(range(24), all_percent,color='r')
plt.savefig('subway_vs_all.png')
|
barometz/shirk | plugs/Auth/auth.py | Python | mit | 3,986 | 0.00276 | # Copyright (c) 2012 Dominic van Berkel
# See LICENSE | for details.
from plugs impo | rt plugbase
from util import Event
class AuthPlug(plugbase.Plug):
"""Auth plug. Handles auth stuffs."""
name = 'Auth'
# manual_auths is a dict of source:target that are created after !auth requests so they can be
# responded to appropriately.
manual_auths = dict()
def load(self, startingup=True):
"""Force reloading the userlist in case the plug is reloaded"""
if not startingup:
for nick, user in self.users.users_by_nick.iteritems():
self.handle_usercreated(user)
@plugbase.event
def handle_usercreated(self, user):
"""A user has joined a channel, so let's give them perms."""
user.power = 0
user.auth_method = ''
found = False
if user.hostmask in self.hosts_auth:
found = True
self.powerup(user, self.hosts_auth[user.hostmask], 'hostmask', user.hostmask)
for nick in self.known_nicks:
if user.nickname.lower().startswith(nick):
found = True
self.core.sendLine('WHOIS %s' % (user.nickname,))
break
if not found and user.nickname in self.manual_auths:
# !auth attempt from unknown user
self.log.info('Failed authentication attempt by %s - nickname not found in auth config.' % (user.nickname,))
self.respond(user.nickname, self.manual_auths[user.nickname],
"%s is not in the auth file. This incident will be reported." % user.nickname)
del self.manual_auths[user.nickname]
@plugbase.event
def handle_userrenamed(self, user, oldnick):
"""A user has changed their nickname, let's recheck auth"""
for nick in self.known_nicks:
if user.nickname.lower().startswith(nick):
self.core.sendLine('WHOIS %s' % (user.nickname,))
break
def powerup(self, user, power, auth_method, auth_match):
"""Set user's power, log and act on `self.manual_auths` if necessary.
:param user: The User instance that is being powered up
:param power: The power (int) the user should have
:param auth_method: The method user to authenticate
:param auth_match: The matched value (e.g. the hostmask or NS account)
"""
user.power = power
user.auth_method = auth_method
if user.nickname in self.manual_auths:
self.respond(user.nickname, self.manual_auths[user.nickname], "Successfully authenticated %s"
% user.nickname)
del self.manual_auths[user.nickname]
self.log.info('Power of %s set to %d based on %s: %s'
% (user.nickname, user.power, auth_method, auth_match))
@plugbase.raw('330')
def handle_loggedinas(self, command, prefix, params):
"""Act on Freenode's 'Logged in as:' response in the WHOIS reply."""
nickname = params[1]
account = params[2]
if account in self.users_auth:
user = self.users.by_nick(nickname)
self.powerup(user, self.users_auth[account], 'NickServ', account)
@plugbase.command()
def cmd_auth(self, source, target, argv):
"""!auth handler to trigger authentication when that didn't happen right at join."""
user = self.users.by_nick(source)
if user is not None:
self.manual_auths[source] = target
self.handle_usercreated(user)
@plugbase.command()
def cmd_whoami(self, source, target, argv):
"""Tell the user what their power is and why."""
user = self.users.by_nick(source)
if user is None or user.power == 0:
self.respond(source, target, '%s: You are powerless.' % source)
else:
self.respond(source, target, '%s: You are authenticated (%s) and have power %d'
% (source, user.auth_method, user.power)) |
roadmapper/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py | Python | gpl-3.0 | 8,136 | 0.002827 | #!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet_info
short_description: Gather information about ec2 VPC subnets in AWS
description:
- Gather information about ec2 VPC subnets in AWS
- This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change.
version_added: "2.1"
author: "Rob White (@wimnat)"
requirements:
- boto3
- botocore
options:
subnet_ids:
description:
- A list of subnet IDs to gather information for.
version_added: "2.5"
aliases: ['subnet_id']
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
type: dict
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all VPC subnets
- ec2_vpc_subnet_info:
# Gather information about a particular VPC subnet using ID
- ec2_vpc_subnet_info:
subnet_ids: subnet-00112233
# Gather information about any VPC subnet with a tag key Name and value Example
- ec2_vpc_subnet_info:
filters:
"tag:Name": Example
# Gather information about any VPC subnet within VPC with ID vpc-abcdef00
- ec2_vpc_subnet_info:
filters:
vpc-id: vpc-abcdef00
# Gather information about a set of VPC subnets, publicA, publicB and publicC within a
# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
# subnet_ids as a list.
- ec2_vpc_subnet_info:
filters:
vpc-id: vpc-abcdef00
"tag:Name": "{{ item }}"
loop:
- publicA
- publicB
- publicC
register: subnet_info
- set_fact:
subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}"
'''
RETURN = '''
subnets:
description: Returns an array of complex objects as described below.
returned: success
type: complex
contains:
subnet_id:
description: The ID of the Subnet.
returned: always
type: str
id:
description: The ID of the Subnet (for backwards compatibility).
returned: always
type: str
vpc_id:
description: The ID of the VPC .
returned: always
type: str
state:
description: The state of the subnet.
returned: always
type: str
tags:
description: A dict of tags associated with the Subnet.
returned: always
type: dict
map_public_ip_on_launch:
description: True/False depending on attribute setting for public IP mapping.
returned: always
type: bool
default_for_az:
description: True if this is the default subnet for AZ.
returned: always
type: bool
cidr_block:
description: The IPv4 CIDR block assigned to the subnet.
returned: always
type: str
available_ip_address_count:
description: Count of available IPs in subnet.
returned: always
type: str
availability_zone:
description: The availability zone where the subnet exists.
returned: always
type: str
assign_ipv6_address_on_creation:
description: True/False depending on attribute setting for IPv6 address assignment.
returned: always
type: bool
ipv6_cidr_block_association_set:
description: An array of IPv6 cidr block association set information.
returned: always
t | ype: complex
contains:
association_id:
description: The association ID
returned: always
type: str
ipv6_cidr_block:
description: The IPv6 CIDR block that is associated with the subnet.
returned: always
type: str
ipv6_cidr_block_state:
description: A hash | /dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
boto3_conn,
ec2_argument_spec,
get_aws_connection_info,
AWSRetry,
HAS_BOTO3,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
ansible_dict_to_boto3_filter_list
)
from ansible.module_utils._text import to_native
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.exponential_backoff()
def describe_subnets_with_backoff(connection, subnet_ids, filters):
"""
Describe Subnets with AWSRetry backoff throttling support.
connection : boto3 client connection object
subnet_ids : list of subnet ids for which to gather information
filters : additional filters to apply to request
"""
return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
def describe_subnets(connection, module):
"""
Describe Subnets.
module : AnsibleModule object
connection : boto3 client connection object
"""
# collect parameters
filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
subnet_ids = module.params.get('subnet_ids')
if subnet_ids is None:
# Set subnet_ids to empty list if it is None
subnet_ids = []
# init empty list for return vars
subnet_info = list()
# Get the basic VPC info
try:
response = describe_subnets_with_backoff(connection, subnet_ids, filters)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
for subnet in response['Subnets']:
# for backwards compatibility
subnet['id'] = subnet['SubnetId']
subnet_info.append(camel_dict_to_snake_dict(subnet))
# convert tag list to ansible dict
subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
module.exit_json(subnets=subnet_info)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
subnet_ids=dict(type='list', default=[], aliases=['subnet_id']),
filters=dict(type='dict', default={})
))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'ec2_vpc_subnet_facts':
module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
try:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
module.fail_json(msg="Region must be specified")
describe_subnets(connection, module)
if __name__ == '__main__':
main()
|
Carlosaarodrigues/orpsoc | orpsoc/system.py | Python | gpl-3.0 | 1,541 | 0.005191 | from orpsoc.orpsocconfigparser import OrpsocConfigParser
from orpsoc.config import Config
import os
import logging
logger = logging.getLogger(__name__)
class System:
def __init__(self, system_file):
logger.debug('__init__() *Entered*' +
'\n system_file=' + str(system_file)
)
self.backend_name = None
system_root = os.path.dirname(system_file)
self.config = OrpsocConfigParser(system_file)
self.name = os.path.basename(system_file).split('.')[0]
if self.config.has_option('main', 'backend'):
self.backend_name = self.config.get('main','backend')
if self.backend_name and self.config.has_section(self.backend_name):
self.backend = dict(self.config.items(self.backend_name))
logger.debug('__init__() -Done-')
def info(self):
logger.debug('info() *Entered*')
print(" | \nSYSTEM INFO")
print("Name: " + self.name)
show_list = lambda s: "\n ".join(s.split('\n'))
if self.backend_name:
print("Backend name: " + self.backend_name)
print(" family: " + self.backend['family'])
| print(" device: " + self.backend['device'])
print("\n tcl_files: " + show_list(self.backend['tcl_files']))
print("\n sdc_files: " + show_list(self.backend['sdc_files']))
logger.debug('info() -Done-')
|
blaketmiller/mediator | mediator/torrent.py | Python | gpl-2.0 | 8,786 | 0.000341 | #!/usr/bin/env python
from __future__ import absolute_import
import os
from shutil import rmtree
from mediator import logger
from mediator.config import settings
from mediator.media import Media
class UpdateError(Exception):
pass
class LinkError(Exception):
pass
class Torrent(object):
def __init__(self, attributes):
"""Interacts with filesystem based on data collected from Media object
Args:
attributes (dict): expects a Media dictionary
Attributes:
name (str): name of torrent tracked
type (str): media type (episode, season, movie, ... )
metadata (dict): dictionary containing attributes and files
path (str): full path to torrent file or directory
links list[(str, str)]: 2-tuple (/path/to/file, CorrectName_(2016))
"""
self.name = attributes.keys()[0]
self.type = attributes[self.name]['type']
self.metadata = attributes[self.name].copy()
self.path = self._set_path()
self.links = self._format_media()
def __iter__(self):
"""Represent class as a dictionary
Returns:
dict: metadata dictionary with torrent name as a parent key
"""
return {self.name: self.metadata}.iteritems()
def __str__(self):
"""Represent class as a string
Returns:
attrs (str): attributes in metadata of torrent
"""
attrs = ""
for k, v in self.metadata.items():
if k != "files":
attrs += str(v) + "\n"
return attrs
def _set_path(self):
"""Construct the path to link to based on media type
Returns:
path (str): /path/to/self.type/media_name
"""
if self.type == "season" or self.type == "episode":
path = "{lib}/broadcasts/{series}/Season {season}".format(
lib=settings['media-library'],
series=self.metadata['series'],
season=self.metadata['season'])
elif self.type == "movie":
path = "{lib}/movies/{title}".format(
lib=settings['media-library'],
title=self.metadata['title'])
return path
def update(self, diff):
"""Update torrent with new metadata attributes
Args:
diff (dict): expects a dictionary subset of torrent's existing keys
Returns:
raise UpdateError if diff keys are not a subset of existing keys
"""
if not set(diff).issubset(set(self.metadata)):
raise UpdateError("Submitted keys not found in metadata")
# update metadata keys with new values
for k, v in diff.items():
print("{}: {} -> {}".format(k, self.metadata[k], v))
self.metadata[k] = v
# seasons require more metadata updates for nested keys
if self.metadata['type'] == "season":
for e in self.metadata['episodes']:
print(" - episode: {}".format(e['episode']))
# update same metadata keys of child as done on parent
for k, v in diff.items():
print(" {}: {} -> {}".format(k, e[k], v))
e[k] = v
# check if metadata changes means name of episode changed
name = Media.get_episode_name(series=self.metadata['series'],
season=self.metadata['season'],
episode=e['episode'])
# update episode name if necessary
if name != e['episodename']:
print(" episodename: {} -> {}".format(e['episodename'],
name))
e['episod | ename'] = name
# check if metadata changes to episode means name of episo | de changed
elif self.metadata['type'] == "episode":
name = Media.get_episode_name(series=self.metadata['series'],
season=self.metadata['season'],
episode=self.metadata['episode'])
# update episode name if necessary
if name != self.metadata['episodename']:
print("episodename: {} -> {}".format(self.metadata['episodename'],
name))
self.metadata['episodename'] = name
def link_media(self):
"""Iterate over all files found and link them with proper styling
to the media directory
Returns:
True if successful, raise LinkError otherwise
"""
try:
os.makedirs(self.path)
except Exception as e:
logger.warning(e)
for link in self.links:
try:
logger.debug("Linking {}".format(link[1]))
os.link(link[0], "{}/{}".format(self.path, link[1]))
except Exception as e:
logger.exception("{}: {}".format(e, link))
raise LinkError("Unable to link torrent to library")
return True
def unlink_media(self):
"""Iterate over all files found and unlink them from media directory
Returns:
True if successful, raise LinkError otherwise
"""
for link in self.links:
try:
logger.debug("Removing link at {}".format(link[1]))
os.remove("{}/{}".format(self.path, link[1]))
except Exception as e:
logger.error("{}: {}".format(e, link))
raise LinkError("Unable to remove content from media library")
return True
def relink_media(self):
"""Removes links from media directory, gathers links and style again,
and then relinks media
Returns:
True if successfull, raise UpdateError otherwise
"""
logger.info("Relinking Torrent object")
# remove links under old names
try:
self.unlink_media()
except LinkError:
raise UpdateError("Unable to update links for media")
# essentially reinitializing the object.
# perhaps there's something more here...
self.links = []
self.path = self._set_path()
self.links = self._format_media()
# recreate links with new names
try:
self.link_media()
except LinkError:
raise UpdateError("Unable to update links for media")
logger.info("Torrrent object media files relinked")
return True
def _format_media(self):
"""Construct the parseable media filename
Returns:
links: 2-tuple like (/full/path/to/file, name_of_symlink)
"""
movie_style = "{title}_({year})"
season_style = "{series}.S{season:02d}"
episode_style = "{series}.S{season:02d}E{episode:02d}"
links = []
# form the style of the media
if self.type == "movie":
style = movie_style.format(
title=self.metadata['title'].replace(" ", "_"),
year=self.metadata['year'])
elif self.type == "episode":
style = episode_style.format(
series=self.metadata['series'].replace(" ", "_"),
season=self.metadata['season'],
episode=self.metadata['episode'])
if self.metadata['episodename']:
style += "_-_{name}".format(
name=self.metadata['episodename'].replace(" ", "_"))
elif self.type == "season":
style = season_style.format(
series=self.metadata['series'].replace(" ", "_"),
season=self.metadata['season'])
# take the naming style and put it together with torrent's media files
if self.type == "season":
for episode in self.metadata['episodes']:
for f in episode['files']:
style = episode_style.format(
series=episode['series'].replace(" ", "_"),
|
PetrDlouhy/django-comments-moderation | comments_moderation/settings.py | Python | agpl-3.0 | 358 | 0.002793 | import | os
from importlib import import_module
def import_module_attr(path):
package, module = path.rsplit('.', 1)
return getattr(import_module(package), module)
settings = import_module_attr(
os.getenv('COMMENTS_MODERATION_SETTINGS_MODULE', 'django.conf.settings')
)
MODERATION_MODE = getattr(settings, 'COMMENTS_MODERATION_MODE', 'approve') | |
helpyellowsn0w/Kurisu | addons/kickban.py | Python | apache-2.0 | 14,000 | 0.002659 | import datetime
import discord
import json
import re
import time
from discord.ext import commands
from sys import argv
class KickBan:
"""
Kicking and banning users.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="kick")
async def kick_member(self, ctx, user, *, reason=""):
"""Kicks a user from the server. Staff only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
msg = "You were kicked from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nYou are able to rejoin the server, but please read the rules in #welcome-and-rules before participating again."
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("uk:"+member.id)
await self.bot.kick(member)
await self.bot.say("{} is now gone. 👌".format(self.bot.escape_name(member)))
msg = "👢 **Kick**: {} kicked {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.kick <user> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="ban")
async def ban_member(self, ctx, user, *, reason=""):
"""Bans a user from the server. OP+ only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
msg = "You were banned from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban does not expire."
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 1)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Ban**: {} banned {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.ban <user> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bo | t.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="silentban", hidden=True)
asy | nc def silentban_member(self, ctx, user, *, reason=""):
"""Bans a user from the server, without a notification. OP+ only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 1)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Silent ban**: {} banned {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.silentban <user> [reason]`." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="timeban")
async def timeban_member(self, ctx, user, length, *, reason=""):
"""Bans a user for a limited period of time. OP+ only.\n\nLength format: #d#h#m#s"""
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unban_time = timestamp + delta
unban_time_string = unban_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timebans.json", "r") as f:
timebans = json.load(f)
timebans[member.id] = unban_time_string
self.bot.timebans[member.id] = [member, unban_time, False] # last variable is "notified", for <=30 minute notifications
with open("data/timebans.json", "w") as f:
json.dump(timebans, f)
msg = "You were banned from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban expires {} {}.".format(unban_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 1)
await self.bot.say("{} is now b& until {} {}. 👍".format(self.bot.escape_name(member), unban_time_string, time.tzname[0]))
msg = "⛔ **Time ban**: {} banned {} until {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, unban_time_string, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.timeban <user> <length> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="softban")
async def softban_member(self, ctx, user, *, reason):
"""Soft-ban a user. OP+ only.\n\nThis "bans" the user without actually doing a ban on Discord. The bot will instead kick the user every time they join. Discord bans are ac |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/nccl/__init__.py | Python | bsd-2-clause | 1,328 | 0 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for th | e specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for using NVIDIA nccl collective ops.
@@all_max
@@all_min
@@all_prod
@@all_sum
@@broadcast
"""
from __future__ import absolute_im | port
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_max
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_min
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_prod
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_sum
from tensorflow.contrib.nccl.python.ops.nccl_ops import broadcast
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
seanfisk/buzzword-bingo-server | djangorestframework/tests/parsers.py | Python | bsd-3-clause | 7,768 | 0.004119 | # """
# ..
# >>> from djangorestframework.parsers import FormParser
# >>> from djangorestframework.compat import RequestFactory
# >>> from djangorestframework.views import View
# >>> from StringIO import StringIO
# >>> from urllib import urlencode
# >>> req = RequestFactory().get('/')
# >>> some_view = View()
# >>> some_view.request = req # Make as if this request had been dispatched
#
# FormParser
# ============
#
# Data flatening
# ----------------
#
# Here is some example data, which would eventually be sent along with a post request :
#
# >>> inpt = urlencode([
# ... ('key1', 'bla1'),
# ... ('key2', 'blo1'), ('key2', 'blo2'),
# ... ])
#
# Default behaviour for :class:`parsers.FormParser`, is to return a single value for each parameter :
#
# >>> (data, files) = FormParser(some_view).parse(StringIO(inpt))
# >>> data == {'key1': 'bla1', 'key2': 'blo1'}
# True
#
# However, you can customize this behaviour by subclassing :class:`parsers.FormParser`, and overriding :meth:`parsers.FormParser.is_a_list` :
#
# >>> class MyFormParser(FormParser):
# ...
# ... def is_a_list(self, key, val_list):
# ... return len(val_list) > 1
#
# This new parser only flattens the lists of parameters that contain a single value.
#
# >>> (data, files) = MyFormParser(some_view).parse(StringIO(inpt))
# >>> data == {'key1': 'bla1', 'key2': ['blo1', 'blo2']}
# True
#
# .. note:: The same functionality is available for :class:`parsers.MultiPartParser`.
#
# Submitting an empty list
# --------------------------
#
# When submitting an empty select multiple, like this one ::
#
# <select multiple="multiple" name="key2"></select>
#
# The browsers usually strip the parameter completely. A hack to avoid this, and therefore being able to submit an empty select multiple, is to submit a value that tells the server that the list is empty ::
#
# <select multiple="multiple" name="key2"><option value="_empty"></select>
#
# :class:`parsers.FormParser` provides the server-side implementation for this hack. Considering the following posted data :
#
# >>> inpt = urlencode([
# ... ('key1', 'blo1'), ('key1', '_empty'),
# ... ('key2', '_empty'),
# ... ])
#
# :class:`parsers.FormParser` strips the values ``_empty`` from all the lists.
#
# >>> (data, files) = MyFormParser(some_view).parse(StringIO(inpt))
# >>> data == {'key1': 'blo1'}
# True
#
# Oh ... but wait a second, the parameter ``key2`` isn't even supposed to be a list, so the parser just stripped it.
#
# >>> class MyFormParser(FormParser):
# ...
# ... def is_a_list(self, key, val_list):
# ... return key == 'key2'
# ...
# >>> (data, files) = MyFormParser(some_view).parse(StringIO(inpt))
# >>> data == {'key1': 'blo1', 'key2': []}
# True
#
# Better like that. Note that you can configure something else than ``_empty`` for the empty value by setting :attr:`parsers.FormParser.EMPTY_VALUE`.
# """
# import httplib, mimetypes
# from tempfile import TemporaryFile
# from django.test import TestCase
# from djangorestframework.compat import RequestFactory
# from djangorestframework.parsers import MultiPartParser
# from djangorestframework.views import View
# from StringIO import StringIO
#
# def encode_multipart_formdata(fields, files):
# """For testing multipart parser.
# fields is a sequence of (name, value) elements for regular form fields.
# files is a sequence of (name, filename, value) elements for data to be uploaded as files
# Return (content_type, body)."""
# BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
# CRLF = '\r\n'
# L = []
# for (key, value) in fields:
# L.append('--' + BOUNDARY)
# L.append('Content-Disposition: form-data; name="%s"' % key)
# L.append('')
# L.append(value)
# for (key, filename, value) in files:
# L.append('--' + BOUNDARY)
# L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
# L.append('Content-Type: %s' % get_content_type(filename))
# L.append('')
# L.append(value)
# L.append('--' + BOUNDARY + '--')
# L.append(' | ')
# body = CRLF.join(L)
# content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
# return content_type, body
#
# def get_content_type(filename):
# return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
#
#class TestMultiPartParser(TestCase):
# def setUp(self):
# self.req = RequestFactory()
# self.content | _type, self.body = encode_multipart_formdata([('key1', 'val1'), ('key1', 'val2')],
# [('file1', 'pic.jpg', 'blablabla'), ('file1', 't.txt', 'blobloblo')])
#
# def test_multipartparser(self):
# """Ensure that MultiPartParser can parse multipart/form-data that contains a mix of several files and parameters."""
# post_req = RequestFactory().post('/', self.body, content_type=self.content_type)
# view = View()
# view.request = post_req
# (data, files) = MultiPartParser(view).parse(StringIO(self.body))
# self.assertEqual(data['key1'], 'val1')
# self.assertEqual(files['file1'].read(), 'blablabla')
from StringIO import StringIO
from cgi import parse_qs
from django import forms
from django.test import TestCase
from djangorestframework.parsers import FormParser
from djangorestframework.parsers import XMLParser
import datetime
class Form(forms.Form):
field1 = forms.CharField(max_length=3)
field2 = forms.CharField()
class TestFormParser(TestCase):
def setUp(self):
self.string = "field1=abc&field2=defghijk"
def test_parse(self):
""" Make sure the `QueryDict` works OK """
parser = FormParser(None)
stream = StringIO(self.string)
(data, files) = parser.parse(stream)
self.assertEqual(Form(data).is_valid(), True)
class TestXMLParser(TestCase):
def setUp(self):
self._input = StringIO(
'<?xml version="1.0" encoding="utf-8"?>'
'<root>'
'<field_a>121.0</field_a>'
'<field_b>dasd</field_b>'
'<field_c></field_c>'
'<field_d>2011-12-25 12:45:00</field_d>'
'</root>'
)
self._data = {
'field_a': 121,
'field_b': 'dasd',
'field_c': None,
'field_d': datetime.datetime(2011, 12, 25, 12, 45, 00)
}
self._complex_data_input = StringIO(
'<?xml version="1.0" encoding="utf-8"?>'
'<root>'
'<creation_date>2011-12-25 12:45:00</creation_date>'
'<sub_data_list>'
'<list-item><sub_id>1</sub_id><sub_name>first</sub_name></list-item>'
'<list-item><sub_id>2</sub_id><sub_name>second</sub_name></list-item>'
'</sub_data_list>'
'<name>name</name>'
'</root>'
)
self._complex_data = {
"creation_date": datetime.datetime(2011, 12, 25, 12, 45, 00),
"name": "name",
"sub_data_list": [
{
"sub_id": 1,
"sub_name": "first"
},
{
"sub_id": 2,
"sub_name": "second"
}
]
}
def test_parse(self):
parser = XMLParser(None)
(data, files) = parser.parse(self._input)
self.assertEqual(data, self._data)
def test_complex_data_parse(self):
parser = XMLParser(None)
(data, files) = parser.parse(self._complex_data_input)
self.assertEqual(data, self._complex_data)
|
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/GLUT/fonts.py | Python | mit | 946 | 0.0074 | """Load font "constants" (actually void *s) from the GLUT DLL"""
from OpenGL import platform
import logging
log = logging.getLogger( 'OpenGL.GLUT.fonts' )
for name in [
| 'GLUT_STROKE_ROMAN',
'GLUT_STROKE_MONO_ROMAN',
'GLUT_BITMAP_9_BY_15',
'GLUT_BITMAP_8_BY_13',
'GLUT_BITMAP_TIMES_ROMAN_10',
'GLUT_BITMAP_TIMES_ROMAN_24',
'GLUT_BITMAP_HELVETICA_10',
'GLUT_BITMAP_HELVETICA_12',
'GLUT_BITMAP_HELVETICA_18',
]:
try:
# Win32 just has pointers to v | alues 1,2,3,etc
# GLX has pointers to font structures...
p = platform.getGLUTFontPointer( name )
except (ValueError,AttributeError), err:
if platform.GLUT:
log.warn( '''Unable to load font: %s''', name )
globals()[name] = None
else:
globals()[name] = p
try:
del p
except NameError, err:
pass
try:
del platform, name
except NameError, err:
pass
|
chrswt/vicarious-microservice | tests/translation.py | Python | mit | 2,992 | 0.001003 | import unittest
import requests
class TranslationTests(unittest.TestCase):
def setUp(self):
self.url = 'http://127.0.0.1/api/translate'
def test_given_words(self):
"""Should pass for the basic test cases provided"""
test_words = ['pig', 'banana', 'trash', 'happy', 'duck', 'glove',
'eat', 'omelet', 'are']
expected_words = ['igpay', 'ananabay', 'ashtray', 'appyhay', 'uckday',
'oveglay', 'eatyay', 'omeletyay', 'areyay']
responses = [requests.post(self.url, x).text for x in test_words]
self.assertEqual(responses, expected_words,
'Should pass for the basic test cases provided')
def test_capitalization(self):
"""Should preserve capitalization in words"""
test_words = ['Capitalized', 'Words', 'Should', 'Work']
expected_words = ['Apitalizedcay', 'Ordsway', 'Ouldshay', 'Orkway']
responses = [requests.post(self.url, x).text for x in test_words]
self.assertEqual(responses, expected_words,
'Words should preserve their capitalization')
def test_sentences(self):
"""Should translate sentences with preserved punctuation"""
test_sentence = ('Long sentences should retain their capitalization, '
'as well as punctuation - hopefully!!')
expected_result = ('Onglay entencessay ouldshay etainray eirthay '
'apitalizationcay, asyay ellway asyay unctuationpay'
' - opefullyhay!!')
response = requests.post(self.url, test_sentence).text
self.assertEqual(response, expected_result,
'Should translate sentences accurately')
def test_edge_cases(self):
"""Should be able to handle words with no vowels"""
test_word = 'sky'
expected_result = 'skyay'
response = requests.post(self.url, test_word).text
self.assertEqual(response, expected_result,
'Should be able to translate words without vowels')
def test_error_cases(self):
"""Should return errors for invalid input"""
self.assertEqual(requests.post(self.url, '').status_code, 406,
'Should | return HTTP/406 for empty strings')
def test_long_paragraphs(self):
"""Should translate long paragraphs with new lines intact"""
self.maxDiff = None
expected_result = ''
test_paragraph = ''
with open('tests/lorem_ipsum.txt') as input_paragraph:
test_paragraph = input_paragraph.read()
with open('tests/lorem_ipsum_translated.txt') as expected:
e | xpected_result = expected.read()
response = requests.post(self.url, test_paragraph).text
self.assertEqual(response, expected_result,
'Should translate long paragraphs accurately')
if __name__ == '__main__':
unittest.main() |
midnightradio/gensim | docs/src/gallery/core/run_core_concepts.py | Python | gpl-3.0 | 15,054 | 0.002192 | r"""
Core Concepts
=============
This tutorial introduces Documents, Corpora, Vectors and Models: the basic concepts and terms needed to understand and use gensim.
"""
import pprint
###############################################################################
# The core concepts of ``gensim`` are:
#
# 1. :ref:`core_concepts_document`: some text.
# 2. :ref:`core_concepts_corpus`: a collection of documents.
# 3. :ref:`core_concepts_vector`: a mathematically convenient representation of a document.
# 4. :ref:`core_concepts_model`: an algorithm for transforming vectors from one representation to another.
#
# Let's examine each of these in slightly more detail.
#
# .. _core_concepts_document:
#
# Document
# --------
#
# In Gensim, a *document* is an object of the `text sequence type <https://docs.python.org/3.7/library/stdtypes.html#text-sequence-type-str>`_ (commonly known as ``str`` in Python 3).
# A document could be anything from a short 140 character tweet, a single
# paragraph (i.e., journal article abstract), a news article, or a book.
#
document = "Human machine interface for lab abc computer applications"
###############################################################################
# .. _core_concepts_corpus:
#
# Corpus
# ------
#
# A *corpus* is a collection of :ref:`core_concepts_document` objects.
# Corpora serve two roles in Gensim:
#
# 1. Input for training a :ref:`core_concepts_model`.
# During training, the models use this *training corpus* to look for common
# themes and topics, initializing their internal model parameters.
#
# Gensim focuses on *unsupervised* models so that no human intervention,
# such as costly annotations or tagging documents by hand, is required.
#
# 2. Documents to organize.
# After training, a topic model can be used to extract topics from new
# documents (documents not seen in the training corpus).
#
# Such corpora can be indexed for
# :ref:`sphx_glr_auto_examples_core_run_similarity_queries.py`,
# queried by semantic similarity, clustered etc.
#
# Here is an example corpus.
# It consists of 9 documents, where each document is a string consisting of a single sentence.
#
text_corpus = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey",
]
###############################################################################
#
# .. Important::
# The above example loads the entire corpus into memory.
# In practice, corpora may be very large, so loading them into memory may be impossible.
# Gensim intelligently handles such corpora by *streaming* them one document at a time.
# See :ref:`corpus_streaming_tutorial` for details.
#
# This is a particularly small example of a corpus for illustration purposes.
# Another example could be a list of all the plays written by Shakespeare, list
# of all wikipedia articles, or all tweets by a particular person of interest.
#
# After collecting our corpus, there are typically a number of preprocessing
# steps we want to undertake. We'll keep it simple and just remove some
# commonly used English words (such as 'the') and words that occur only once in
# the corpus. In the process of doing so, we'll tokenize our data.
# Tokenization breaks up the documents into words (in this case using space as
# a delimiter).
#
# .. Important::
# There are better ways to perform preprocessing than just lower-casing and
# splitting by space. Effective preprocessing is beyond the scope of this
# tutorial: if you're interested, check out the
# :py:func:`gensim.utils.simple_preprocess` function.
#
# Create a set of frequent words
stoplist = set('for a of the and to in'.split(' '))
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in text_corpus]
# Count word frequencies
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
# Only keep words that appear more than once
processed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
pprint.pprint(processed_corpus)
###############################################################################
# Before proceeding, we want to associate each word in the corpus with a unique
# integer ID. We can do this using the :py:class:`gensim.corpora.Dictionary`
# class. This dictionary defines the vocabulary of all words that our
# processing knows about.
#
from gensim import corpora
dictionary = corpora.Dictionary(processed_corpus)
print(dictionary)
###############################################################################
# Because our corpus is small, there are only 12 different tokens in this
# :py:class:`gensim.corpora.Dictionary`. For larger corpuses, dictionaries that
# contains hundreds of thousands of tokens are quite common.
#
###############################################################################
# .. _core_concepts_vector:
#
# Vector
# ------
#
# To infer the latent structure in our corpus we need a way to represent
# documents that we can manipulate mathematically. One approach is to represent
# each document as a vector of *features*.
# For example, a single feature may be thought of as a question-answer pair:
#
# 1. How many times does the word *splonge* appear in the document? Zero.
# 2. How many paragraphs does the document consist of? Two.
# 3. How many fonts does the document use? Five.
#
# The question is usually represented only by its integer id (such as `1`, `2` and `3`).
# The representation of this document then becomes a series of pairs like ``(1, 0.0), (2, 2.0), (3, 5.0)``.
# This is known as a *dense vector*, because it contains an explicit answer to each of the above questions.
#
# If we know all the questions in advance, we may leave them implicit
# and simply represent the document as ``(0, 2, 5)``.
# This sequence of answers is the **vector** for our document (in this case a 3-dimensional dense vector).
# For practical purposes, only questions to which the answer is (or
# can be converted to) a *single floating point number* are allowed in Gensim.
#
# In practice, vectors often consist of many zero values.
# To save memory, Gensim omits all vector elements with value 0.0.
# The above example thus becomes ``(2, 2.0), (3, 5.0)``.
# This is known as a *sparse vector* or *bag-of-words vector*.
# The values of all missing features in this sparse representation can be unambiguously resolved to zero, ``0.0``.
#
# Assuming the questions are the same, we can compare the vectors of two different documents to each other.
# For example, assume we are given two vectors ``(0.0, 2.0, 5.0)`` and ``(0.1, 1.9, 4.9)``.
# Because the vectors are very similar to each other, we can conclude that the documents corresponding to those vectors are similar, too.
# Of course, the correctness of that conclusion depends on how well we picked the questions in the first place.
#
# Another approach to represent a document as a vector is the *bag-of-words
# model*.
# Under the bag-of-words model each document is represented by a vector
# containing the frequency counts of each word in the dictionary.
# For example, assume we have a dictionary containing the words
# ``['coffee', 'milk', 'sugar', 'spoon']``.
# A document consisting of the string ``"coffee milk coffee"`` would then
# be represented by the vector ``[2, 1, 0, 0]`` where the entries of the vecto | r
# are (in order) the occurrences of "coffee", "milk", "sugar" and "spoon" in
# the document. The length of the vector is the number of e | ntries in the
# dictionary. One of the main properties of the bag-of-words model is that it
# completely ignores the order |
quietguy675/website | mysite/settings.py | Python | bsd-3-clause | 3,208 | 0.001247 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6e6hiq9-#_(c%cs^h5n9s)ary_dvl4d^--xu%7#-#8wn8hzqat'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'projects.apps.ProjectsConfig',
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = | [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.1 | 1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
undeath/joinmarket-clientserver | jmbitcoin/test/test_bip21.py | Python | gpl-3.0 | 6,062 | 0.00165 | import jmbitcoin as btc
import pytest
def test_bip21_decode():
# These should raise exception because of not being valid BIP21 URI's
with pytest.raises(ValueError):
btc.decode_bip21_uri('')
btc.decode_bip21_uri('nfdjksnfjkdsnfjkds')
btc.decode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
btc.decode_bip21_uri(
'175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=20.3')
btc.decode_bip21_uri('bitcoin:')
btc.decode_bip21_uri('bitcoin:?amount=20.3')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=XYZ')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100\'000')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100,000')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100000000')
assert(btc.decode_bip21_uri('bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(btc.decode_bip21_uri('BITCOIN:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(btc.decode_bip21_uri('BitCoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Luke-Jr')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['label'] == 'Luke-Jr')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=20.3&label=Luke-Jr')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['amount'] == 2030000000)
assert(parsed['label'] == 'Luke-Jr')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=50&label=Luke-Jr&message=Donation%20for%20project%20xyz')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['amount'] == 5000000000)
assert(parsed['label'] == 'Luke-Jr')
assert(parsed['message'] == 'Donation for project xyz')
# This should raise exception because of unknown req-* parameters
with pytest.raises(ValueError):
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?req-somethingyoudontunderstand=50&req-somethingelseyoudontget=999')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?somethingyoudontunderstand=50&somethingelseyoudontget=999')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['somethingyoudontunderstand'] == '50')
assert(parsed['somethingelseyoudontget'] == '999')
def test_bip21_encode():
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {}) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'label': 'Luke-Jr'
}) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Luke-Jr'
)
# Both dictionary and list of tuples should work
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('label', 'Luke-Jr')
]) ==
| 'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Luke-Jr'
)
# Use list of tuples version for multiple parameter tests, as dicts don't
# have guaranteed ordering.
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amoun | t', 20.3),
('label', 'Luke-Jr')
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=20.3&label=Luke-Jr'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', 50),
('label', 'Luke-Jr'),
('message', 'Donation for project xyz')
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=50&label=Luke-Jr&message=Donation%20for%20project%20xyz'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('req-somethingyoudontunderstand', 50),
('req-somethingelseyoudontget', 999)
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?req-somethingyoudontunderstand=50&req-somethingelseyoudontget=999'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('somethingyoudontunderstand', 50),
('somethingelseyoudontget', 999)
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?somethingyoudontunderstand=50&somethingelseyoudontget=999'
)
# Invalid amounts must raise ValueError
with pytest.raises(ValueError):
# test dicts
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': ''
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': 'XYZ'
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': '100\'000'
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': '100,000'
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': '100000000'
})
# test list of tuples
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', 'XYZ')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '100\'000')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '100,000')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '100000000')
])
|
Earthstar/double-d | streetfarer/views.py | Python | mit | 7,086 | 0.004375 | import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.context_processors import csrf
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response
from django.template import Template, Context, RequestContext
from django.template.loader import get_template
from pathgenerator.models import UserForm
def home(request):
c = {"tag_list":["art_gallery", "bicycle_store", "cafe", "book_store",
"aquarium", "park", "pet_store", "campground", "zoo",
"cemetery", "funeral_home", "liquor_store", "hospital",
"beauty_salon", "clothing_store", "florist", "hair_care",
"jewelry_store", "shoe_store", "shopping_mall", "spa", "department_store",
"accounting", "atm", "bank", "courthouse", "finance",
"insurance_agency", "lawyer", "parking", "post_office", "storage",
"bar", "casino", "night_club", "amusement_park"],
"login_form": UserForm()}
c.update(csrf(request))
return render_to_response('home.html', c)
def add_user(request):
'''
Example of user creation form. Form automatically checks if user already exists.
'''
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(username=username, password=password)
# if user is not None:
# if user.is_active:
# login(request, user)
# return HttpResponseRedirect('success/')
# else:
# return HttpResponse('Disabled account')
# else:
# return HttpResponse('invalid login')
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
User.objects.create_user(**form.cleaned_data)
# Must call authenticate before calling login
user = authenticate(username=request.POST['username'], password=request.POST['password'])
# Since user was just created, don't bother doing existence checks
login(request, user)
else:
form = UserForm()
return render(request, 'adduser.html', {'form': form})
# This decorator automatically directs user to the login page
# You can direct the user to the page they were trying to visit by using the 'next' parameter
@login_required
def secret(request):
user = request.user
t = get_template('secret.html')
html = t.render(Context({'user': user}))
return HttpResponse(html)
def logout_page(request):
"""
Log users out and re-direct them to the main page.
"""
logout(request)
return HttpResponseRedirect('/')
def harmony(request):
t = get_template('harmony.html')
html = t.render(Context({'title': "Django Title"}))
return HttpResponse(html)
def mockup(request):
t = get_template('mockup.html')
html = t.render(Context({}))
return HttpResponse(html)
def signup_ajax(request):
'''
Create user using ajax. Also logs in user.
'''
message = _signup_ajax(request)
message = json.dumps(message)
return HttpResponse(content=message, content_type="application/json")
def _signup_ajax(request):
'''
Helper function which creates a user.
Lol server-side password match checking.
'''
if request.method == "POST":
if not "username" in request.POST and \
"password" in request.POST and \
"passwordCheck" in request.POST and \
"email" in request.POST:
message = {
"status": "error",
"message": "Missing field"
}
return message
# Check if username exists
username = request.POST["username"]
if User.objects.filter(u | sername=username).count() == 1:
message = {
"status": "error",
"message": "Username already exists"
}
return message
password = request.POST["password"]
password_check = request.POST["passwordCheck"]
if password != password_check:
message = {
"status": "error",
"message": | "Password does not match"
}
return message
email = request.POST["email"]
User.objects.create_user(username=username, email=email, password=password)
user = authenticate(username=username, password=password)
login(request, user)
message = {
"status": "success",
"message": "User created and logged in"
}
return message
else:
message = {
"status": "error",
"message": "Wrong request method"
}
return message
def login_ajax(request):
'''
Login function intended to be used with ajax request.
Not going to use UserForm.
Returns json of form {
status: <"error", "okay">,
message: <Description of status>
}
'''
message = _login_ajax(request)
message = json.dumps(message)
return HttpResponse(content=message, content_type="application/json")
def _login_ajax(request):
'''
Helper function for login_ajax.
Returns a json object.
'''
if request.method == "POST":
if not "username" in request.POST and "password" in request.POST:
message = {
"status": "error",
"message": "Invalid form input"
}
return message
username = request.POST["username"]
password = request.POST["password"]
# Verify that user exists
if User.objects.filter(username=username).count() == 0:
message = {
"status": "error",
"message": "Username doesn't exist"
}
return message
user = authenticate(username=username, password=password)
print user
print type(user)
if user is not None:
# is_active is a boolean, not a function like Django documentation says
if user.is_active:
login(request, user)
message = {
"status": "success",
"message": "Login successful"
}
return message
else:
message = {
"status": "error",
"message": "User is not active"
}
return message
else:
message = {
"status": "error",
"message": "Wrong password for username"
}
return message
else:
message = {
"status": "error",
"message": "Wrong request method"
}
return message
def is_logged_in(request):
if request.user.is_authenticated():
message = {
"message": "true"
}
else:
message = {
"message": "false"
}
return HttpResponse(content=json.dumps(message), content_type="application/json")
|
Hexadorsimal/pynes | nes/processors/cpu/instructions/addressing_modes/immediate.py | Python | mit | 461 | 0 | from .addressing_mode import AddressingMode
class ImmediateAddressingMode(AddressingMode):
@property
def instruction_size(se | lf):
return 2
def calculate_address(self, processor, parameter):
raise RuntimeError('This should never be called')
def read_source(self, processor, parameter):
return pa | rameter
def write_result(self, processor, parameter, value):
raise RuntimeError('This should never be called')
|
cybojenix/projects_reward_system | cg4/wsgi.py | Python | apache-2.0 | 381 | 0.002625 | """
WSGI config for cg4 project.
It exposes the WSGI callable as a module-level variabl | e named ``application``.
For mor | e information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cg4.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
marmyshev/item_title | openlp/plugins/songs/forms/songmaintenanceform.py | Python | gpl-2.0 | 24,170 | 0.002483 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
import os
from PyQt4 import QtGui, QtCore
from sqlalchemy.sql import and_
from openlp.core.lib import Registry, UiStrings, translate
from openlp.core.lib.ui import critical_error_message_box
from openlp.plugins.songs.forms.authorsform import AuthorsForm
from openlp.plugins.songs.forms.topicsform import TopicsForm
from openlp.plugins.songs.forms.songbookform import SongBookForm
from openlp.plugins.songs.lib.db import Author, Book, Topic, Song
from .songmaintenancedialog import Ui_SongMaintenanceDialog
log = logging.getLogger(__name__)
class SongMaintenanceForm(QtGui.QDialog, Ui_SongMaintenanceDialog):
"""
Class documentation goes here.
"""
def __init__(self, manager, parent=None):
"""
Constructor
"""
super(SongMaintenanceForm, self).__init__(parent)
self.setupUi(self)
self.manager = manager
self.author_form = Aut | horsForm(self)
self.topic_form = TopicsForm(self)
self.song_book_form = SongBookForm(self)
# Disable all edit and delete buttons, as there is no row selected.
self.delete_author_button.setEnabled(False)
self.edit_author_button.setEnabled(False)
self.delete_topic_button.setEnabled(False)
self.edit_topic_button.setEnabled(False)
self.delete_book_button.setEnabled(False)
self.edit_book_button.setEnabled(False)
# Signa | ls
self.add_author_button.clicked.connect(self.on_add_author_button_clicked)
self.add_topic_button.clicked.connect(self.on_add_topic_button_clicked)
self.add_book_button.clicked.connect(self.on_add_book_button_clicked)
self.edit_author_button.clicked.connect(self.on_edit_author_button_clicked)
self.edit_topic_button.clicked.connect(self.on_edit_topic_button_clicked)
self.edit_book_button.clicked.connect(self.on_edit_book_button_clicked)
self.delete_author_button.clicked.connect(self.on_delete_author_button_clicked)
self.delete_topic_button.clicked.connect(self.on_delete_topic_button_clicked)
self.delete_book_button.clicked.connect(self.on_delete_book_button_clicked)
self.authors_list_widget.currentRowChanged.connect(self.on_authors_list_row_changed)
self.topics_list_widget.currentRowChanged.connect(self.on_topics_list_row_changed)
self.song_books_list_widget.currentRowChanged.connect(self.on_song_books_list_row_changed)
def exec_(self, from_song_edit=False):
"""
Show the dialog.
``from_song_edit``
Indicates if the maintenance dialog has been opened from song edit
or from the media manager. Defaults to **False**.
"""
self.from_song_edit = from_song_edit
self.type_list_widget.setCurrentRow(0)
self.reset_authors()
self.reset_topics()
self.reset_song_books()
self.type_list_widget.setFocus()
return QtGui.QDialog.exec_(self)
def _get_current_item_id(self, list_widget):
"""
Get the ID of the currently selected item.
``list_widget``
The list widget to examine.
"""
item = list_widget.currentItem()
if item:
item_id = (item.data(QtCore.Qt.UserRole))
return item_id
else:
return -1
def _delete_item(self, item_class, list_widget, reset_func, dlg_title, del_text, err_text):
"""
Delete an item.
"""
item_id = self._get_current_item_id(list_widget)
if item_id != -1:
item = self.manager.get_object(item_class, item_id)
if item and not item.songs:
if critical_error_message_box(dlg_title, del_text, self, True) == QtGui.QMessageBox.Yes:
self.manager.delete_object(item_class, item.id)
reset_func()
else:
critical_error_message_box(dlg_title, err_text)
else:
critical_error_message_box(dlg_title, UiStrings().NISs)
def reset_authors(self):
"""
Reloads the Authors list.
"""
self.authors_list_widget.clear()
authors = self.manager.get_all_objects(Author, order_by_ref=Author.display_name)
for author in authors:
if author.display_name:
author_name = QtGui.QListWidgetItem(author.display_name)
else:
author_name = QtGui.QListWidgetItem(' '.join([author.first_name, author.last_name]))
author_name.setData(QtCore.Qt.UserRole, author.id)
self.authors_list_widget.addItem(author_name)
def reset_topics(self):
"""
Reloads the Topics list.
"""
self.topics_list_widget.clear()
topics = self.manager.get_all_objects(Topic, order_by_ref=Topic.name)
for topic in topics:
topic_name = QtGui.QListWidgetItem(topic.name)
topic_name.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_widget.addItem(topic_name)
def reset_song_books(self):
"""
Reloads the Books list.
"""
self.song_books_list_widget.clear()
books = self.manager.get_all_objects(Book, order_by_ref=Book.name)
for book in books:
book_name = QtGui.QListWidgetItem('%s (%s)' % (book.name, book.publisher))
book_name.setData(QtCore.Qt.UserRole, book.id)
self.song_books_list_widget.addItem(book_name)
def check_author_exists(self, new_author, edit=False):
"""
Returns *False* if the given Author already exists, otherwise *True*.
"""
authors = self.manager.get_all_objects(
Author,
and_(
Author.first_name == new_author.first_name,
Author.last_name == new_author.last_name,
Author.display_name == new_author.display_name
)
)
return self.__check_object_exists(authors, new_author, edit)
def check_topic_exists(self, new_topic, edit=False):
"""
|
KuKuKai/104asiajava | 509.py | Python | mit | 712 | 0.050562 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 19:32:49 2018
@author: JinJheng
"""
x,y=map(int,input().split(','))
m,n=map(int,input().split(','))
if n > y:
greater = n
else:
greater = y
while(True):
if((greater % n == 0) and (greater % y == 0)):
q = greater
break
greater += 1
o=q/y*x
r=q/n*m
p=int(o+r)
def compute():
if p>q:
small=q
else:
small=p
for i in range(1,small+1):
if (p%i==0)and(q%i==0):
ans=i
print(x,end='')
print('/',end='')
print(y,'+', | m,end='')
print('/',end='')
pr | int(n,'=',int(p/ans),end='')
print('/',end='')
print(int(q/ans))
compute() |
kcartier/tensorflow-toe-in-the-water | tensorflow/python/kernel_tests/linalg_grad_test.py | Python | apache-2.0 | 3,516 | 0.012514 | """Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class MatrixInverseGradientTest(tf.test.TestCase):
pass # Filled in below
def _GetMatrixInverseGradientTest(dtype_, shape_):
def Test(self):
with self.test_session():
np.random.seed(1)
m = np.random.uniform(low=1.0,
high=100.0, |
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = tf.constant(m)
epsilon = np.finfo(dtype_).eps
# Optimal stepsize for central difference is O(epsilon^{1/3}).
delta = epsilon**(1.0 / 3.0)
tol = 1e-3
if len(shape_) == 2:
ainv = tf.matrix_inverse(a)
else:
| ainv = tf.batch_matrix_inverse(a)
theoretical, numerical = gc.ComputeGradient(a,
shape_,
ainv,
shape_,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixDeterminantGradientTest(tf.test.TestCase):
pass # Filled in below
def _GetMatrixDeterminantGradientTest(dtype_, shape_):
def Test(self):
with self.test_session():
np.random.seed(1)
m = np.random.uniform(low=1.0,
high=100.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = tf.constant(m)
epsilon = np.finfo(dtype_).eps
# Optimal stepsize for central difference is O(epsilon^{1/3}).
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-3
if len(shape_) == 2:
c = tf.matrix_determinant(a)
else:
c = tf.batch_matrix_determinant(a)
out_shape = shape_[:-2] # last two dimensions hold matrices
theoretical, numerical = gc.ComputeGradient(a, shape_, c, out_shape,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# TODO(rmlarsen,irving): Reenable float32 once tolerances are fixed
# The test used to loop over (np.float, np.double), both of which are float64.
for dtype in (np.float64,):
for size in 2, 3, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
setattr(MatrixInverseGradientTest, 'testMatrixInverseGradient_' + name,
_GetMatrixInverseGradientTest(dtype, shape))
for dtype in (np.float64,):
for size in 2, 5, 10:
# increase this list to check batch version
for extra in [()]:
shape = extra+(size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
setattr(MatrixDeterminantGradientTest,
'testMatrixDeterminantGradient_' + name,
_GetMatrixDeterminantGradientTest(dtype, shape))
tf.test.main()
|
google-research/federated | distributed_dp/distributed_discrete_gaussian_query_test.py | Python | apache-2.0 | 6,913 | 0.009258 | # Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DistributedDiscreteGaussianQuery."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.dp_query import test_utils
from distributed_dp import discrete_gaussian_utils
from distributed_dp import distributed_discrete_gaussian_query
ddg_sum_query = distributed_discrete_gaussian_query.DistributedDiscreteGaussianSumQuery
def silence_tf_error_messages(func):
"""Decorator that temporarily changes the TF logging levels."""
def wrapper(*args, **kwargs):
cur_verbosity = tf.compat.v1.logging.get_verbosity()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)
func(*args, **kwargs)
tf.compat.v1.logging.set_verbosity(cur_verbosity) # Reset verbosity.
return wrapper
class DistributedDiscreteGaussianQueryTest(tf.test.TestCase,
parameterized.TestCase):
def test_sum_no_noise(self):
with self.cached_session() as sess:
record1 = tf.constant([2, 0], dtype=tf.int32)
record2 = tf.constant([-1, 1], dtype=tf.int32)
query = ddg_sum_query(l2_norm_bound=10, local_scale=0.0)
query_result, _ = test_utils.run_query(query, [record1, record2])
result = sess.run(query_result)
expected = [1, 1]
self.assertAllEqual(result, expected)
@parameterized.product(sample_size=[1, 3])
def test_sum_multiple_shapes(self, sample_size):
with self.cached_session() as sess:
t1 = tf.constant([2, 0], dtype=tf.int32)
t2 = tf.constant([-1, 1, 3], dtype=tf.int32)
t3 = tf.constant([-2], dtype=tf.int32)
record = [t1, t2, t3]
sample = [record] * sample_size
query = ddg_sum_query(l2_norm_bound=10, local_scale=0.0)
query_result, _ = test_utils.run_query(query, sample)
expected = [sample_size * t1, sample_size * t2, sample_size * t3]
result, expected = sess.run([query_result, expected])
# Use `assertAllClose` for nested structures equality (with tolerance=0).
self.assertAllClose(result, expected, atol=0)
@parameterized.product(sample_size=[1, 3])
def test_sum_nested_record_structure(self, sample_size):
with self.cached_session() as sess:
t1 = tf.constant([1, 0], dtype=tf.int32)
t2 = tf.constant([1, 1, 1], dtype=tf.int32)
t3 = tf.constant([1], dtype=tf.int32)
t4 = tf.constant([[1, 1], [1, 1]], dtype=tf.int32)
record = [t1, dict(a=t2, b=[t3, (t4, t1)])]
sample = [record] * sample_size
query = ddg_sum_query(l2_norm_bound=10, local_scale=0.0)
query_result, _ = test_utils.run_query(query, sample)
result = sess.run(query_result)
s = sample_size
expected = [t1 * s, dict(a=t2 * s, b=[t3 * s, (t4 * s, t1 * s)])]
# Use `assertAllClose` for nested structures equality (with tolerance=0)
self.assertAllClose(result, expected, atol=0)
def test_sum_raise_on_float_inputs(self):
with self.cached_session() as sess:
record1 = tf.constant([2, 0], dtype=tf.float32)
record2 = tf.constant([-1, 1], dtype=tf.float32)
query = ddg_sum_query(l2_norm_bound=10, local_scale=0.0)
with self.assertRaises(Ty | peError):
query_result, _ = test_utils.run_query(query, [record1, record2])
sess.run(query_result)
@parameterized.product(l2_norm_bound=[0, 3, 10, 14.1])
@silence_tf_error_messages
def test_sum_raise_on_l2_norm_excess(self, l2_norm_bound):
with self.cached_session() as sess:
record = tf.constant([10, 10], dtype=tf.int32)
query = ddg_sum_query(l2_norm_bound=l2_norm_bound, local_scale=0.0)
with self.assertRaises(tf.errors.InvalidArgumentError):
| query_result, _ = test_utils.run_query(query, [record])
sess.run(query_result)
def test_sum_float_norm_not_rounded(self):
"""Test that the float L2 norm bound doesn't get rounded/casted to integers."""
with self.cached_session() as sess:
# A casted/rounded norm bound would be insufficient.
l2_norm_bound = 14.2
record = tf.constant([10, 10], dtype=tf.int32)
query = ddg_sum_query(l2_norm_bound=l2_norm_bound, local_scale=0.0)
query_result, _ = test_utils.run_query(query, [record])
result = sess.run(query_result)
expected = [10, 10]
self.assertAllEqual(result, expected)
@parameterized.named_parameters([('2_local_scale_1_record', 2, 1),
('10_local_scale_4_records', 10, 4),
('1000_local_scale_1_record', 1000, 1),
('1000_local_scale_25_records', 1000, 25)])
def test_sum_local_noise_shares(self, local_scale, num_records):
"""Test the noise level of the sum of discrete Gaussians applied locally.
The sum of discrete Gaussians is not a discrete Gaussian, but it will be
extremely close for sigma >= 2. We will thus compare the aggregated noise
to a central discrete Gaussian noise with appropriately scaled stddev with
some reasonable tolerance.
Args:
local_scale: The stddev of the local discrete Gaussian noise.
num_records: The number of records to be aggregated.
"""
# Aggregated local noises.
num_trials = 1000
record = tf.zeros([num_trials], dtype=tf.int32)
sample = [record] * num_records
query = ddg_sum_query(l2_norm_bound=10.0, local_scale=local_scale)
query_result, _ = test_utils.run_query(query, sample)
# Central discrete Gaussian noise.
central_scale = np.sqrt(num_records) * local_scale
central_noise = discrete_gaussian_utils.sample_discrete_gaussian(
scale=tf.cast(tf.round(central_scale), record.dtype),
shape=tf.shape(record),
dtype=record.dtype)
agg_noise, central_noise = self.evaluate([query_result, central_noise])
mean_stddev = central_scale * np.sqrt(num_trials) / num_trials
atol = 3.5 * mean_stddev
# Use the atol for mean as a rough default atol for stddev/percentile.
self.assertAllClose(np.mean(agg_noise), np.mean(central_noise), atol=atol)
self.assertAllClose(np.std(agg_noise), np.std(central_noise), atol=atol)
self.assertAllClose(
np.percentile(agg_noise, [25, 50, 75]),
np.percentile(central_noise, [25, 50, 75]),
atol=atol)
if __name__ == '__main__':
tf.test.main()
|
mancoast/CPythonPyc_test | cpython/251_test_strptime.py | Python | gpl-3.0 | 23,903 | 0.003598 | """PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import test_support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.failUnlessEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.failUnless(strftime_output in testing, "%s: not found in tuple" %
error_msg)
self.failUnless(comparison == strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Tes | ting against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.failUnless(strftime_output in self.LT_ins.am_pm,
| "AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.failUnless(strftime_output == self.LT_ins.am_pm[position],
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.failUnless(timezone in self.LT_ins.timezone[0] or \
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date_time,
magic_date),
"LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date,
magic_date),
"LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_time,
magic_date),
"LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.failUnless(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.failUnlessEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.failUnless(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.failUnless(r"\\d\+" in pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.failUnless(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.failUnless(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.failUnless(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.failUnless(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.failUnless(_strptime |
svebk/DeepSentiBank_memex | scripts/tests/deprecated/read_image_from_hbase.py | Python | bsd-2-clause | 875 | 0.008 | import happybase
fro | m StringIO import StringIO
from PIL import Image
def decode_image_PIL(binary_data):
""" Returns PIL image from bina | ry buffer.
"""
f = StringIO(binary_data)
img = Image.open(f)
return img
if __name__=="__main__":
tab_image = 'image_cache'
col_image = dict()
col_image['image_cache'] = 'image:binary'
conn = happybase.Connection(host='10.1.94.57')
image_rows = dict()
image_rows['image_cache'] = ['0000007031E3FA80C97940017253BEAB542EA334', '000001EC5DD154E58B72326EFC26A41A4C8E9586',
'0000081A1D6D1A2023DAE07547C242ED3106E7FE']
table = conn.table(tab_image)
for row in table.rows(image_rows[tab_image]):
binary_data = row[1][col_image[tab_image]]
img = decode_image_PIL(binary_data)
print("Saving image to: {}".format(row[0]+'.jpeg'))
img.save(row[0]+'.jpeg',"JPEG")
|
kcrandall/Kaggle_Mercedes_Manufacturing | spark/experiements/jingning/experiment1.py | Python | mit | 13,439 | 0.011087 | # imports
import pandas as pd
import numpy as np
import time
import os
from tabulate import tabulate
import sys
from operator import add
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark.sql import functions as F #https://stackoverflow.com/questions/39504950/python-pyspark-get-sum-of-a-pyspark-dataframe-column-values
from get_type_lists import get_type_lists
from target_encoder import target_encoder
from feature_combiner import feature_combiner
from logging_lib.LoggingController import LoggingController
#Define your s3 bucket to load and store data
S3_BUCKET = 'emr-related-files'
#Create a custom logger to log statistics and plots
logger = LoggingController()
logger.s3_bucket = S3_BUCKET
#.config('spark.executor.cores','6') \
spark = SparkSession.builder \
.appName("App") \
.getOrCreate()
# .master("local[*]") \
# .config('spark.cores.max','16')
#.master("local") \
# .config("spark.some.config.option", "some-value") \
spark.sparkContext.setLogLevel('WARN') #Get rid of all the junk in output
Y = 'y'
ID_VAR = 'ID'
DROPS = [ID_VAR]
#From an XGBoost model
# NOTE the top 6 are categorical, might want to look into this.
MOST_IMPORTANT_VARS_ORDERD = ['X5','X0','X8','X3','X1','X2','X314','X47','X118',\
'X315','X29','X127','X236','X115','X383','X152','X151','X351','X327','X77','X104',\
'X267','X95','X142']
#Load data from s3
train = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('s3n://emr-related-files/train.csv')
test = spark.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('s3n://emr-related-files/test.csv')
#this needs to be done for h2o glm.predict() bug (which needs same number of columns)
test = test.withColumn(Y,test[ID_VAR])
#Work around for splitting wide data, you need to split on only an ID varaibles
#Then join back with a train varaible (bug in spark as of 2.1 with randomSplit())
(train1,valid1) = train.select(ID_VAR).randomSplit([0.7,0.3], seed=123)
valid = valid1.join(train, ID_VAR,'inner')
train = train1.join(train,ID_VAR,'inner')
# print('TRAIN DATA')
# train.show(2)
# print('VALID DATA')
# valid.show(2)
#workdaround for h2o predict
test1 = test.select(ID_VAR,Y)
test2 = test.drop(Y)
test = test1.join(test2,ID_VAR,'inner')
original_nums, cats = get_type_lists(frame=train,rejects=[ID_VAR,Y],frame_type='spark')
print("Encoding numberic variables...")
training_df_list, test_df_list,valid_df_list = list(),list(),list()
for i, var in enumerate(cats):
total = len(cats)
print('Encoding: ' + var + ' (' + str(i+1) + '/' + str(total) + ') ...')
logger.log_string('Encoding: ' + var + ' (' + str(i+1) + '/' + str(total) + ') ...')
tr_enc,v_enc, ts_enc = target_encoder(train, test, var, Y,valid_frame=valid,frame_type='spark',id_col=ID_VAR)
training_df_list.append(tr_enc)
test_df_list.append(ts_enc)
valid_df_list.append(v_enc)
#join all the new variables
for i, df in enumerate(training_df_list):
train = train.join(training_df_list[i],ID_VAR,'inner')
valid = valid.join(valid_df_list[i],ID_VAR,'inner')
test = test.join(test_df_list[i],ID_VAR,'inner')
# print('TRAIN DATA')
# train.show(2)
# print('VALID DATA')
# valid.show(2)
# print('TEST DATA')
# test.show(2)
print('Done encoding.')
encoded_nums, cats = get_type_lists(frame=train,rejects=[ID_VAR,Y],frame_type='spark')
#Remplace cats with encoded cats from MOST_IMPORTANT_VARS_ORDERD
for i, v in enumerate(MOST_IMPORTANT_VARS_ORDERD):
if v in cats:
MOST_IMPORTANT_VARS_ORDERD[i] = v + '_Tencode'
print('Combining features....')
(train, valid, test) = feature_combiner(train, test, MOST_IMPORTANT_VARS_ORDERD, valid_frame = valid, frame_type='spark')
print('Done combining features.')
encoded_combined_nums, cats = get_type_lists(frame=train,rejects=[ID_VAR,Y],frame_type='spark')
################################################################################
# DONE WITH PREPROCESSING - START TRAINING #
################################################################################
import h2o
h2o.show_progress() # turn on progress bars
from h2o.estimators.glm import H2OGeneralizedLinearEstimator # import GLM models
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.grid.grid_search import H2OGridSearch # grid search
import matplotlib
matplotlib.use('Agg') #Need this if running matplot on a server w/o display
from pysparkling import *
conf = H2OConf(spark=spark)
conf.nthreads = -1
hc = | H2OContext.getOrCreate(spark,conf)
print('Making h2o frames...')
trainHF = hc.as_h2o_frame(train, "trainTable")
validHF = hc.as_h2o_frame(valid, "validTable")
testHF = hc.as_h2o_frame(test, "testTable")
print('Done making h2o frames.')
logger.log_string("Train Summary:")
logger.log_string("Rows:{}".format(trainHF.nrow))
logger.log_string("Cols:{}".format(trainHF.ncol))
# print(trainHF.summary(return_data=True | ))
# logger.log_string(tabulate(trainHF.summary(return_data=True),tablefmt="grid"))
# logger.log_string(trainHF._ex._cache._tabulate('grid',False))
base_train, stack_train = trainHF.split_frame([0.5], seed=12345)
base_valid, stack_valid = validHF.split_frame([0.5], seed=12345)
# def upload_submission(sub,predict_column='predict'):
# # create time stamp
# import re
# import time
# time_stamp = re.sub('[: ]', '_', time.asctime())
#
# # save file for submission
# # sub.columns = [ID_VAR, Y]
# sub_fname = 'Submission_'+str(time_stamp) + '.csv'
# # h2o.download_csv(sub, 's3n://'+S3_BUCKET+'/kaggle_submissions/Mercedes/' +sub_fname)
#
# spark_sub_frame = hc.as_spark_frame(sub)
#
# spark_sub_frame.select(ID_VAR,predict_column).coalesce(1).write.option("header","true").csv('s3n://'+S3_BUCKET+'/Kaggle_Submissions/Mercedes/' +sub_fname)
def glm_grid(X, y, train, valid, should_submit = False):
""" Wrapper function for penalized GLM with alpha and lambda search.
:param X: List of inputs.
:param y: Name of target variable.
:param train: Name of training H2OFrame.
:param valid: Name of validation H2OFrame.
:return: Best H2Omodel from H2OGeneralizedLinearEstimator
"""
alpha_opts = [0.01, 0.25, 0.5, 0.99] # always keep some L2
family = ["gaussian", "binomial", "quasibinomial", "multinomial", "poisson", "gamma", "tweedie"]
hyper_parameters = {"alpha":alpha_opts
}
# initialize grid search
grid = H2OGridSearch(
H2OGeneralizedLinearEstimator(
family="gaussian",
lambda_search=True,
seed=12345),
hyper_params=hyper_parameters)
# train grid
grid.train(y=y,
x=X,
training_frame=train,
validation_frame=valid)
# show grid search results
print(grid.show())
best = grid.get_grid()[0]
print(best)
# if should_submit:
# sub_frame = testHF[ID_VAR].cbind(best.predict(testHF))
# print(sub_frame.col_names)
# print('Submission frame preview:')
# print(sub_frame[0:10, [ID_VAR, 'predict']])
# upload_submission(sub_frame,'predict')
# plot top frame values
print('yhat_frame')
yhat_frame = valid.cbind(best.predict(valid))
print(yhat_frame[0:10, [y, 'predict']])
# plot sorted predictions
yhat_frame_df = yhat_frame[[y, 'predict']].as_data_frame()
yhat_frame_df.sort_values(by='predict', inplace=True)
yhat_frame_df.reset_index(inplace=True, drop=True)
plt = yhat_frame_df.plot(title='Ranked Predictions Plot')
logger.log_string('Ranked Predictions Plot')
logger.log_matplotlib_plot(plt)
# select best model
return best
def neural_net_grid(X, y, train, valid):
# define random grid search parameters
hyper_parameters = {'hidden': [[170, 320], [80, 190], [320, 160, 80], [100], [50, 50, 50, 50]],
'l1':[s/1e4 for s in range(0, 1000, 100)],
|
chrislit/abydos | abydos/distance/_henderson_heron.py | Python | gpl-3.0 | 2,964 | 0 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._henderson_heron.
Henderson-Heron dissimilarity
"""
from math import factorial
from typing import Any
from ._token_distance import _TokenDistance
__all__ = ['HendersonHeron']
class HendersonHeron(_TokenDistance):
r"""Henderson-Heron dissimilarity.
For two sets X and Y and a population N, Henderson-Heron dissimilarity
:cite:`Henderson:1977` is:
.. math:
sim_{Henderson-Heron}(X, Y) = \frac{|X|! |Y|! (|N| - |X|)!
(|N|- |Y|)!}{|N|! |X \cap Y|! (|X| - |X \cap Y|)!
(|Y| - |Y \cap X|)! (|N| - |X| - |Y| + |X \cap Y|)!}
.. versionadded:: 0.4.1
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize HendersonHeron instance.
Parameters
----------
**kwargs
Arbitrary keyword arguments
.. versionadded:: 0.4.1
"""
super(HendersonHeron, self).__init__(**kwargs)
def dist(self, src: str, tar: str) -> float:
"""Return the Henderson-Heron dissimilarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Henderson-Heron dissimilarity
Examples
---- | ----
>>> cmp = HendersonHeron()
>>> cmp.dist('cat', 'hat')
0.00011668873858680838
>>> cmp.dist('Niall', 'Neil')
0.00048123075776606097
>>> cmp.dist('aluminum', 'Catalan')
| 0.08534181060514882
>>> cmp.dist('ATCG', 'TAGC')
0.9684367974410505
.. versionadded:: 0.4.1
"""
self._tokenize(src, tar)
a = self._intersection_card()
ab = self._src_card()
ac = self._tar_card()
n = self._population_unique_card()
return (
factorial(ab)
* factorial(ac)
* factorial(n - ab)
* factorial(n - ac)
/ (
factorial(n)
* factorial(a)
* factorial(ab - a)
* factorial(ac - a)
* factorial((n - ac - ab + a))
)
)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
RevansChen/online-judge | Codefights/arcade/python-arcade/level-2/12.Fix-Message/Python/test.py | Python | mit | 819 | 0 | # Python3
from solution1 import fixMessage as f
qa = [
("you'll NEVER believe what that 'FrIeNd' of mine did!!1",
"You'll never believe what that 'friend' of mine did!!1"),
('i',
'I'),
('We are so doomed.',
'We are so doomed.'),
("LOL you've GOT to hear this one XDD",
"Lol you've got to hear this one xdd"),
("ok, here's the TRUTH: I have AbSoLuTeLy NOTHING to do with it!",
"Ok, here's the truth: i have | absolutely nothing to do with it!"),
(':)',
':)')
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' | output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
|
geopython/QGIS | tests/src/python/test_qgslayoutmap.py | Python | gpl-2.0 | 16,013 | 0.000999 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemMap.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 Nyall Dawson'
__date__ = '20/10/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QFileInfo, QRectF, QDir
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.core import (QgsLayoutItemMap,
QgsRectangle,
QgsRasterLayer,
QgsVectorLayer,
QgsLayout,
QgsMapSettings,
QgsProject,
QgsMultiBandColorRenderer,
QgsCoordinateReferenceSystem,
QgsTextFormat,
QgsFontUtils,
QgsPalLayerSettings,
QgsNullSymbolRenderer,
QgsPoint,
QgsFeature,
QgsVectorLayerSimpleLabeling,
QgsLabelingEngineSettings,
QgsLayoutMeasurement,
QgsUnitTypes,
QgsLayoutObject,
QgsProperty,
QgsReadWriteContext)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from qgslayoutchecker import QgsLayoutChecker
from test_qgslayoutitem import LayoutItemTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutMap(unittest.TestCase, LayoutItemTestCase):
@classmethod
def setUpClass(cls):
cls.item_class = QgsLayoutItemMap
def setUp(self):
self.report = "<h1>Python QgsLayoutItemMap Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
myPath = os.path.join(TEST_DATA_DIR, 'rgb256x256.png')
rasterFileInfo = QFileInfo(myPath)
self.raster_layer = QgsRasterLayer(rasterFileInfo.filePath(),
rasterFileInfo.completeBaseName())
rasterRenderer = QgsMultiBandColorRenderer(
self.raster_layer.dataProvider(), 1, 2, 3)
self.raster_layer.setRenderer(rasterRenderer)
myPath = os.path.join(TEST_DATA_DIR, 'points.shp')
vector_file_info = QFileInfo(myPath)
self.vector_layer = QgsVectorLayer(vector_file_info.filePath(),
vector_file_info.completeBaseName(), 'ogr')
assert self.vector_layer.isValid()
# pipe = mRasterLayer.pipe()
# assert pipe.set(rasterRenderer), 'Cannot set pipe renderer'
QgsProject.instance().addMapLayers([self.raster_layer, self.vector_layer])
# create layout with layout map
self.layout = QgsLayout(QgsProject.instance())
self.layout.initializeDefaults()
self.map = QgsLayoutItemMap(self.layout)
self.map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
self.map.setFrameEnabled(True)
self.map.setLayers([self.raster_layer])
self.layout.addLayoutItem(self.map)
def testMapCrs(self):
# create layout with layout map
map_settings = QgsMapSettings()
map_settings.setLayers([self.vector_layer])
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
# check that new maps inherit project CRS
QgsProject.instance().setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
rectangle = QgsRectangle(-13838977, 2369660, -8672298, 6250909)
map.setExtent(rectangle)
map.setLayers([self.vector_layer])
layout.addLayoutItem(map)
self.assertEqual(map.crs().authid(), 'EPSG:4326')
self.assertFalse(map.presetCrs().isValid())
# overwrite CRS
map.setCrs(QgsCoordinateReferenceSystem('EPSG:3857'))
self.assertEqual(map.crs().authid(), 'EPSG:3857')
self.assertEqual(map.presetCrs().authid(), 'EPSG:3857')
checker = QgsLayoutChecker('composermap_crs3857', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# overwrite CRS
map.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
self.assertEqual(map.presetCrs().authid(), 'EPSG:4326')
self.assertEqual(map.crs().authid(), 'EPSG:4326')
rectangle = QgsRectangle(-124, 17, -78, 52)
map.zoomToExtent(rectangle)
checker = QgsLayoutChecker('composermap_crs4326', layout)
checker.setControlPathPrefix("composer_map")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# change back to project CRS
map.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(map.crs().authid(), 'EPSG:4326')
self.assertFalse(map.presetCrs().isValid())
def testContainsAdvancedEffects(self):
map_settings = QgsMapSettings()
map_settings.setLayers([self.vector_layer])
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
self.assertFalse(map.containsAdvancedEffects())
self.vector_layer.setBlendMode(QPainter.CompositionMode_Darken)
result = map.containsAdvancedEffects()
self.vector_layer.setBlendMode(QPainter.CompositionMode_SourceOver)
self.assertTrue(result)
def testRasterization(self):
map_settings = QgsMapSettings()
map_settings.setLayers([self.vector_layer])
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
self.assertFalse(map.requiresRasterization())
self.vector_layer.setBlendMode(QPainter.CompositionMode_Darken)
self.assertFalse(map.requiresRasterization())
self.assertTrue(map.containsAdvancedEffects())
map.setBackgroundEnabled(False)
self.assertTrue(map.requiresRasterization())
map.setBackgroundEnabled(True)
map.setBackgroundColor(QColor(1, 1, 1, 1))
self.assertTrue(map.requiresRasterization())
self.vector_layer.setBlendMode(QPainter.CompositionMode_SourceOver)
def testLabelMargin(self):
"""
Test rendering m | ap item with a label margin set
"""
format = QgsTextFormat()
format.setFont(QgsFontUtils.getStandardTestFont("Bold"))
format.setSize(20)
format.setNamedStyle("Bold")
format.setColor(QColor(0, 0, 0))
settings = QgsPalLayerSettings()
settings.setFormat(format)
settings.fieldName = "'X'"
settings.isExpression = True
settings.placement = QgsPalLayerSettings.OverPoint
vl = QgsVe | ctorLayer("Point?crs=epsg:4326&field=id:integer", "vl", "memory")
vl.setRenderer(QgsNullSymbolRenderer())
f = QgsFeature(vl.fields(), 1)
for x in range(15):
for y in range(15):
f.setGeometry(QgsPoint(x, y))
vl.dataProvider().addFeature(f)
vl.setLabeling(QgsVectorLayerSimpleLabeling(settings))
vl.setLabelsEnabled(True)
p = QgsProject()
engine_settings = QgsLabelingEngineSettings()
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, False)
engine_settings.setFlag(QgsLabelingEngineSettings.DrawLabelRectOnly, True)
p.setLabe |
cliqz/socorro | socorro/unittest/external/fs/test_fs_new_crash_source.py | Python | mpl-2.0 | 1,839 | 0.003263 | # This Source Code Form is subject to the te | rms of the Mozilla Public
# License, v. 2 | .0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import eq_, ok_
from socorro.external.fs.fs_new_crash_source import (
FSNewCrashSource
)
from socorro.lib.util import DotDict
from socorro.unittest.testbase import TestCase
#==============================================================================
class FakeCrashStore(object):
def __init__(self, config, quit_check):
self.config = config
self.quit_check = quit_check
def new_crashes(self):
for a_crash_id in range(10):
yield str(a_crash_id)
def ack_crash(self, crash_id):
return crash_id
#==============================================================================
class TestConnection(TestCase):
"""Test FSNewCrashSource class. """
#--------------------------------------------------------------------------
def _setup_config(self):
config = DotDict();
config.crashstorage_class = FakeCrashStore
return config
#--------------------------------------------------------------------------
def test_constructor(self):
config = self._setup_config()
ncs = FSNewCrashSource(config, "ignored_processor_name")
ok_(isinstance(ncs.crash_store, FakeCrashStore))
ok_(ncs.crash_store.config is config)
#--------------------------------------------------------------------------
def test__iter__(self):
config = self._setup_config()
ncs = FSNewCrashSource(config, "ignored_processor_name")
for i, (args, kwargs) in zip(range(10), ncs()):
crash_id = args[0]
eq_(str(i), crash_id)
eq_(kwargs, {})
eq_(i, 9)
|
Torkvamedo/smx | project-chat/chatApp/management/commands/start_tornado.py | Python | unlicense | 701 | 0.008559 | from tornado.options import options, d | efine
import django.core.handlers.wsgi
import tornado.httpserver, tornado.ioloop
import tornado.web, tornado.wsgi
import chatApp.sockets
import sockjs.tornado
define('port', type=int, default=8888)
wsgi_app = tor | nado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
tornado_app = tornado.web.Application(
sockjs.tornado.SockJSRouter(chatApp.sockets.SocketHandler, '/sockjs')
.urls + [('.*', tornado.web.FallbackHandler,dict(fallback=wsgi_app)),])
server = tornado.httpserver.HTTPServer(tornado_app)
server.listen(options.port)
print("[*] Listening at 0.0.0.0:%i" % (options.port,))
tornado.ioloop.IOLoop.instance().start() |
UoMCS/Perentie | view/placeholder.py | Python | gpl-3.0 | 1,235 | 0.048583 | #!/usr/bin/env python
"""
A GTK+ widget wh | ich can be used to fill space where a widget is unavailable, for
example if a system doesn't have a memory, this can be shown in its place in a
memory window.
"""
import gtk, gobject
class Placeholder(gtk.Alignment):
def __init__(self, title, body = None, stock_icon_id = gtk.STOCK_DIALOG_INFO):
"""
Draws an icon, large title and some body text centered in the widget.
"""
gtk.Align | ment.__init__(self, 0.5, 0.5)
self.title = title
self.body = body or ""
self.stock_icon_id = stock_icon_id
self.vbox = gtk.VBox(spacing = 15)
self.vbox.set_border_width(15)
self.add(self.vbox)
self.icon = gtk.Image()
self.icon.set_from_stock(self.stock_icon_id, gtk.ICON_SIZE_DIALOG)
self.vbox.pack_start(self.icon, fill = True, expand = False)
self.title_label = gtk.Label()
self.title_label.set_markup("<span weight = 'bold' size='x-large'>%s</span>"%self.title)
self.vbox.pack_start(self.title_label, fill = True, expand = False)
self.body_label = gtk.Label()
self.body_label.set_markup(self.body)
self.body_label.set_line_wrap(True)
self.vbox.pack_start(self.body_label, fill = True, expand = False)
self.vbox.show_all()
|
OGKevin/ComBunqWebApp | apiwrapper/endpoints/request_inquiry.py | Python | mit | 2,898 | 0 | from apiwrapper.endpoints.endpoint import Endpoint
from apiwrapper.endpoints.monetary_account import MonetaryAccount
class RequestInquiry(Endpoint):
__endpoint_request_inquiry = "request-inquiry"
__endpoint_request_inquiry_batch = "request-inquiry-batch"
__endpoint_request_response = "request-response"
__endpoint_request_chat = "chat"
@classmethod
def _get_base_endpoint(cls, user_id, account_id):
endpoint = MonetaryAccount._get_base_endpoint(user_id)
endpoint += "/%d" % account_id
return endpoint
# Request Inquiry Logic
def get_all_inquiries_for_account(self, user_id, account_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s" % self.__endpoint_request_inquiry
return self._make_get_request(endpoint)
def get_request_inquiry_by_id(self, user_id, account_id, request_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s/%d" % (self.__endpoint_request_inquiry, request_id)
return self._make_get_request(endpoint)
def get_chat_for_request_inquiry(self, user_id, account_id, request_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s/%d/%s" % (
self.__endpoint_request_inquiry,
request_id,
self.__endpoint_request_chat
)
return self._make_get_request(endpoint)
# Request Inquiry Batch Logic
def get_all_batch_inquiries_for_account(self, user_id, account_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s" % self.__endpoint_request_inquiry_batch
return self._make_get_request(endpoint)
def get_batch_inquiry_by_id(self, user_id, account_id, inquiry_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s/%d" % (
self.__endpoint_request_inquiry_batch, inquiry_id)
return self._make_get_reques | t(endpoint)
# Request Response Logic
def get_all_request_responses_for_account(self, user_id, account_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpo | int += "/%s" % self.__endpoint_request_response
return self._make_get_request(endpoint)
def get_request_response_by_id(self, user_id, account_id, response_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s/%d" % (self.__endpoint_request_response, response_id)
return self._make_get_request(endpoint)
def get_chat_for_request_response(self, user_id, account_id, request_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%s/%d/%s" % (
self.__endpoint_request_response,
request_id,
self.__endpoint_request_chat
)
return self._make_get_request(endpoint)
|
twilio/twilio-python | tests/integration/voice/v1/test_byoc_trunk.py | Python | mit | 8,787 | 0.003073 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ByocTrunkTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks.create()
self.holodeck.assert_has_request(Request(
'post',
'https://voice.twilio.com/v1/ByocTrunks',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"voice_url": "https://byoc.example.com/twilio/app",
"voice_method": "POST",
"voice_fallback_method": "POST",
"voice_fallback_url": "https://byoc.example.com/twilio/fallback",
"status_callback_method": "POST",
"status_callback_url": "https://byoc.example.com/twilio/status_callback",
"cnam_lookup_enabled": false,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:36Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.voice.v1.byoc_trunks.create()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://voice.twilio.com/v1/ByocTrunks/BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"voice_url": "https://byoc.example.com/twilio/app",
"voice_method": "POST",
"voice_fallback_method": "POST",
"voice_fallback_url": "https://byoc.example.com/twilio/fallback",
"status_callback_method": "POST",
"status_callback_url": "https://byoc.example.com/twilio/status_callback",
"cnam_lookup_enabled": false,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:37Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks.list()
self.holodeck.assert_has_request(Request(
'get',
'https://voice.twilio.com/v1/ByocTrunks',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"next_page_url": null,
"key": "byoc_trunks"
},
"byoc_trunks": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"voice_url": "https://byoc.example.com/twilio/app",
"voice_method": "POST",
"voice_fallback_method": "POST",
"voice_fallback_url": "https://byoc.example.com/twilio/fallback",
"status_callback_method": "POST",
"status_callback_url": "https://byoc.example.com/twilio/status_callback",
"cnam_lookup_enabled": false,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:37Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.voice.v1.byoc_trunks.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://voice.twilio.com/v1/ByocTrunks?PageSize=50&Page=0",
"next_page_url": null,
"key": "byoc_trunks"
},
"byoc_trunks": []
}
'''
))
actual = self.client.voice.v1.byoc_trunks.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
| ' | https://voice.twilio.com/v1/ByocTrunks/BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "update_name",
"voice_url": "https://byoc.example.com/twilio_updated/app",
"voice_method": "GET",
"voice_fallback_method": "GET",
"voice_fallback_url": "https://byoc.example.com/twilio_updated/fallback",
"status_callback_method": "GET",
"status_callback_url": "https://byoc.example.com/twilio_updated/status_callback",
"cnam_lookup_enabled": true,
"connection_policy_sid": "NYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab",
"from_domain_sid": "SDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab",
"date_created": "2020-03-18T23:31:36Z",
"date_updated": "2020-03-18T23:31:37Z",
"url": "https://voice.twilio.com/v1/ByocTrunks/BYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.voice.v1.byoc_trunks("BYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
|
instana/python-sensor | bin/create_general_release.py | Python | mit | 1,486 | 0.001346 | #!/usr/bin/env python
# (c) Copyright IBM Corp. 2021
# (c) Copyright Insta | na Inc. 2020
# Script to make a new python-sensor release on Github
# Requires the Github CLI to be installed and configured: https://github.com/cli/cli
import os
import sys
import distutils.spawn
from subprocess import check_output
if len(sys.argv) | != 2:
raise ValueError('Please specify the version to release. e.g. "1.27.1"')
if sys.argv[1] in ['-h', '--help']:
filename = os.path.basename(__file__)
print("Usage: %s <version number>" % filename)
print("Exampe: %s 1.27.1" % filename)
print("")
print("This will create a release on Github such as:")
print("https://github.com/instana/python-sensor/releases/tag/v1.27.1")
# Check requirements first
for cmd in ["gh"]:
if distutils.spawn.find_executable(cmd) is None:
print("Can't find required tool: %s" % cmd)
sys.exit(1)
version = sys.argv[1]
semantic_version = 'v' + version
title = version
body = """
This release includes the following fixes & improvements:
*
Available on PyPI:
https://pypi.python.org/pypi/instana/%s
""" % version
response = check_output(["gh", "release", "create", semantic_version,
"-d", # draft
"-R", "instana/python-sensor",
"-t", semantic_version,
"-n", body])
print("If there weren't any failures, the draft release is available at:")
print(response.strip().decode())
|
raychorn/openstack-dashboard-sample | horizon/openstack_dashboard/dashboards/dashboardname/panel1/tabs.py | Python | gpl-3.0 | 877 | 0.00114 | # vim: | tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
# @author: Sergey Sudakovich, Cisco Systems, Inc.
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
class IndexTabs(tabs.TabGroup):
slug = "indextabs"
tabs = (,)
|
Damangir/Registry-Management | src/stream_reader/__init__.py | Python | gpl-2.0 | 40 | 0.025 | from long_format impor | t LongFormatReader | |
pythad/nider | examples/draw_on_texture_example/script.py | Python | mit | 1,593 | 0.000628 | from nider.core import Font
from nider.core import Outline
from nider.models import Header
from nider.models import Paragraph
from nider.models import Linkback
from nider.models import Content
from nider.models import TwitterPost
# TODO: change this fontpath to the fontpath on your machine
roboto_font_folder = '/home/ovd/.local/share/fonts/Roboto/'
outline = Outline(2, '#121212')
header = Header(text='Your super interesting title!',
font=Font(roboto_font_folder + 'Roboto-Bold.ttf', 30),
text_width=40,
align='left',
color='#ededed'
)
para = Paragraph(text='Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in repre | henderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.',
font=Font(roboto_font_folder + 'Roboto-Medium.ttf', 29),
text_width=65,
align='left',
color='#ededed'
)
linkback = Linkback(text='foo.com | @username',
| font=Font(roboto_font_folder + 'Roboto-Bold.ttf', 24),
color='#ededed'
)
content = Content(para, header, linkback)
img = TwitterPost(content,
fullpath='result.png'
)
# TODO: change this texture path to the texture path on your machine
img.draw_on_texture('texture.png')
|
minlexx/pyevemon | esi_client/models/get_universe_regions_region_id_not_found.py | Python | gpl-3.0 | 3,034 | 0.00033 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetUniverseRegionsRegionIdNotFound(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
GetUniverseRegionsRegionIdNotFound - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this GetUniverseRegionsRegionIdNotFound.
Not found message
:return: The error of this GetUniverseRegionsRegionIdNotFound.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this GetUniverseRegionsRegionIdNotFound.
Not found message
:param error: The error of this GetUniverseRegionsRegionIdNotFound.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
| result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetUniverseRegionsRegionIdNotFound):
return False
re | turn self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
sivas2811/mocha_739 | mocha/mocha/settings.py | Python | unlicense | 2,537 | 0.002365 | """
Django settings for mocha project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4z_f-7=0d-f^v | 5)60toxze=i7z9&%ia4rjrcyqv@(xbwvia-rm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
TEMPLATE_DIRS = (
"templates",
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contr | ib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mocha_models',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mocha.urls'
WSGI_APPLICATION = 'mocha.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mocha', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'mocha', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.