repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Robozor-network/arom-web_ui
|
src/aromweb/node_handlers/pymlab_handler.py
|
Python
|
gpl-3.0
| 1,316
| 0.009119
|
from handler.base import BaseHandler
import glob
import json
import rospy
class pymlabBase(BaseHandler):
def get(self, path = None):
print('OK')
file = rospy.get_param('/arom/node/pymlab_node/feature/pymlab_structure/cfg
|
')
self.render('modules/features/pymlab.hbs', current_file = file)
def post(self, path = None):
operation = self.get_argument('operation', 'get_json')
print('operation>>', self.get_argument('operation', 'XXX'))
if operation == 'get_json':
filename = self.get_argument('file', False)
print(filename)
|
if filename and filename != 'false':
file = filename
else:
file = glob.glob("/home/odroid/robozor/station/pymlab_presets/*.json")[0]
file = rospy.get_param('/arom/node/pymlab_node/feature/pymlab_structure/cfg')
json_data = json.load(open(file))
self.write(json_data)
elif operation == 'push_json':
filename = self.get_argument('file')
data = json.loads(self.get_argument('data'))
print(filename, data)
if True: #TODO: overeni spravne cesty....
with open(filename, 'w') as f:
json.dump(data, f)
self.write('OK')
|
simion/sublime-slack-integration
|
api.py
|
Python
|
gpl-2.0
| 1,631
| 0.000613
|
import json
import subprocess
import sublime
try:
from urllib.parse import urlencode
from urllib.request import urlopen
except ImportError:
from urllib import urlencode, urlopen
BASE_URL = 'https://slack.com/api/'
def api_call(method, call_args={}, loading=None, filename=None, icon=None):
if icon:
call_args['icon_url'] = icon
print('icon', icon)
URL = BASE_URL + method + "?" + urlencode(call_args)
print('calling:', URL)
try:
if filename:
f = open(filename, 'rb')
filebody = f.read()
f.close()
data = urlencode({'content': filebody})
response = urlopen(
url=URL,
data=data.encode('utf8')
).read().decode('utf8')
else:
response = urlopen(url=URL).read().decode('utf8')
except:
# fallback for sublime bug with urlopen (on linux only)
if filename: # upload filename
proc = subprocess.Popen(
['curl', '-X', 'POST', '-F', 'file=@'+filename, URL],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(
['curl', '-s', URL],
stdout=subprocess.PIPE,
|
stderr=subprocess.PIPE)
out, err = proc.communicate()
response = out.decode('utf8')
response = json.loads(response)
if not response['ok']:
sublime.error_message("SLACK Api error: " + response['error'])
if loading:
loading.done = True
return False
retu
|
rn response
|
lu-zero/aravis
|
tests/python/arv-enum-test.py
|
Python
|
lgpl-2.1
| 1,513
| 0.001322
|
#!/usr/bin/env python
# Aravis - Digital camera library
#
# Copyright (c) 2011-2012 Emmanuel Pacaud
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free
|
Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, write to the
# Free Software Foundatio
|
n, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: Emmanuel Pacaud <emmanuel@gnome.org>
# If you have installed aravis in a non standard location, you may need
# to make GI_TYPELIB_PATH point to the correct location. For example:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/opt/bin/lib/girepositry-1.0/
#
# You may also have to give the path to libaravis.so, using LD_PRELOAD or
# LD_LIBRARY_PATH.
import gi
gi.require_version ('Aravis', '0.2')
from gi.repository import Aravis
print Aravis.Auto
print Aravis.Auto.OFF
print Aravis.BufferStatus
print Aravis.DebugLevel
print Aravis.DomNodeType
print Aravis.GvStreamPacketResend
print Aravis.GvspPacketType
print Aravis.PixelFormat
print Aravis.PixelFormat.MONO_8
|
DONIKAN/django
|
tests/context_processors/tests.py
|
Python
|
bsd-3-clause
| 3,031
| 0
|
"""
Tests for Django's bundled context processors.
"""
from django.test import SimpleTestCase, TestCase, override_settings
@override_settings(
ROOT_URLCONF='context_processors.urls',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
],
},
}],
)
class RequestContextProcessorTests(SimpleTestCase):
"""
Tests for the ``django.template.context_processors.request`` processor.
"""
def test_request_attributes(self):
"""
Test that the request object is available in the template and that its
attributes can't be overridden by GET and POST parameters (#3828).
"""
url = '/request_attrs/'
# We should have the request object in the template.
response = self.client.get(url)
self.assertContains(response, 'Have request')
# Test is_secure.
response = self.client.get(url)
self.assertContains(response, 'Not secure')
response = self.client.get(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
response = self.client.post(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
# Test path.
response = self.client.get(url)
self.assertCo
|
ntains(response, url)
response = self.client.get(url, {'path': '/blah/'})
self.assertContains(response, url)
response = self.client.post(url, {'path': '/blah/'})
self.assertContains(response, url)
@override_settings(
DEBUG=True,
INTERNAL_IPS=['127.0.0.1'],
ROOT_
|
URLCONF='context_processors.urls',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
],
},
}],
)
class DebugContextProcessorTests(TestCase):
"""
Tests for the ``django.template.context_processors.debug`` processor.
"""
def test_debug(self):
url = '/debug/'
# We should have the debug flag in the template.
response = self.client.get(url)
self.assertContains(response, 'Have debug')
# And now we should not
with override_settings(DEBUG=False):
response = self.client.get(url)
self.assertNotContains(response, 'Have debug')
def test_sql_queries(self):
"""
Test whether sql_queries represents the actual amount
of queries executed. (#23364)
"""
url = '/debug/'
response = self.client.get(url)
self.assertContains(response, 'First query list: 0')
self.assertContains(response, 'Second query list: 1')
# Check we have not actually memoized connection.queries
self.assertContains(response, 'Third query list: 2')
|
CybOXProject/python-cybox
|
cybox/objects/dns_cache_object.py
|
Python
|
bsd-3-clause
| 990
| 0.00101
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox import fields
import cybox.binding
|
s.dns_cache_object as dns_cache_binding
from cybox.common import ObjectProperties, PositiveInteger
from cybox.objects.dns_record_object import DNSRecord
class DNSCacheEntry(entities.Entity):
_namespace = "http://cybox.mitre.org/objects#DNSCacheObject-2"
_binding = dns_cache_binding
_binding_class = dns_cache_binding.DNSCacheEntryType
dns_entry = fields.TypedField("DNS_Entry", DNSRecord)
ttl = fields.TypedField("TTL", P
|
ositiveInteger)
class DNSCache(ObjectProperties):
_binding = dns_cache_binding
_binding_class = dns_cache_binding.DNSCacheObjectType
_namespace = "http://cybox.mitre.org/objects#DNSCacheObject-2"
_XSI_NS = "DNSCacheObj"
_XSI_TYPE = "DNSCacheObjectType"
dns_cache_entry = fields.TypedField("DNS_Cache_Entry", DNSCacheEntry, multiple=True)
|
uudiin/bleachbit
|
tests/TestWinapp.py
|
Python
|
gpl-3.0
| 11,482
| 0.001132
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2015 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test cases for module Winapp
"""
import os
import shutil
import sys
import unittest
sys.path.append('.')
from bleachbit.Winapp import Winapp, detectos, detect_file, section2option
from bleachbit.Windows import detect_registry_key
import common
if 'nt' == os.name:
import _winreg
else:
def fake_detect_registry_key(f):
return True
import bleachbit.Windows
bleachbit.Windows.detect_registry_key = fake_detect_registry_key
def get_winapp2():
"""Download and cache winapp2.ini. Return local filename."""
url = "http://www.winapp2.com/Winapp2.ini"
tmpdir = None
if 'posix' == os.name:
tmpdir = '/tmp'
if 'nt' == os.name:
tmpdir = os.getenv('TMP')
fn = os.path.join(tmpdir, 'bleachbit_test_winapp2.ini')
if os.path.exists(fn):
import time
import stat
age_seconds = time.time() - os.stat(fn)[stat.ST_MTIME]
if age_seconds > (24 * 36 * 36):
print 'note: deleting stale file %s ' % fn
os.remove(fn)
if not os.path.exists(fn):
f = file(fn, 'w')
import urllib2
txt = urllib2.urlopen(url).read()
f.write(txt)
return fn
class WinappTestCase(unittest.TestCase):
"""Test cases for Winapp"""
def run_all(self, cleaner, really_delete):
"""Test all the cleaner options"""
for (option_id, __name) in cleaner.get_options():
for cmd in cleaner.get_commands(option_id):
for result in cmd.execute(really_delete):
common.validate_result(self, result, really_delete)
def test_remote(self):
"""Test with downloaded file"""
winapps = Winapp(get_winapp2())
for cleaner in winapps.get_cleaners():
self.run_all(cleaner, False)
def test_detectos(self):
"""Test detectos function"""
# Tests are in the format (required_ver, mock, expected_return)
tests = (('5.1', '5.1', True),
('5.1', '6.0', False),
('6.0', '5.1', False),
('|5.1', '5.1', True),
('|5.1', '6.0', False),
('6.1|', '5.1', False),
('6.1|', '6.0', False),
('6.1|', '6.1', True),
('6.1|', '6.2', True),
('6.2|', '5.1', False),
('6.2|', '6.0', False),
('6.2|', '6.1', False),
('6.2|', '6.2', True))
for (s, mock, expected_return) in tests:
actual_return = detectos(s, mock)
self.assertEqual(expected_return, actual_return,
'detectos(%s, %s)==%s instead of %s' % (s, mock,
actual_return, expected_return))
def test_detect_file(self):
"""Test detect_file function"""
tests = [('%windir%\\system32\\kernel32.dll', True),
('%windir%\\system32', True),
('%ProgramFiles%\\Internet Explorer', True),
('%ProgramFiles%\\Internet Explorer\\', True),
('%windir%\\doesnotexist', False),
('%windir%\\system*', True),
('%windir%\\*ystem32', True),
('%windir%\\*ystem3*', True)]
# On 64-bit Windows, Winapp2.ini expands the %ProgramFiles% environment
# variable to also %ProgramW6432%, so test unique entries in
# %ProgramW6432%.
import struct
if not 32 == 8 * struct.calcsize('P'):
raise NotImplementedError('expecting 32-bit Python')
if os.getenv('ProgramW6432'):
dir_64 = os.listdir(os.getenv('ProgramFiles'))
dir_32 = os.listdir(os.getenv('ProgramW6432'))
dir_32_unique = set(dir_32) - set(dir_64)
if dir_32 and not dir_32_unique:
raise RuntimeError(
'Test expects objects in %ProgramW6432% not in %ProgramFiles%')
for pathname in dir_32_unique:
tests.append(('%%ProgramFiles%%\\%s' % pathname, True))
else:
print 'NOTE: skipping %ProgramW6432% tests because WoW64 not detected'
for (pathname, expected_return) in tests:
actual_return = detect_file(pathname)
msg = 'detect_file(%s) returned %s' % (pathname, actual_return)
self.assertEqu
|
al(expected_return, actual_return, msg)
def test_fake(self):
"""Test with fake file"""
ini_fn = None
keyfull = 'HKCU\\Software\\BleachBit\\DeleteThisKey'
subkey = 'Software\\BleachBit\\DeleteThisKey\\AndThisKey'
def setup_fake(f1_filename=None):
"""Setup the test environment"""
dirname = tempfile.mkdtemp(prefix='bleachbit-test-winapp')
f1 = os.path.join(dirname, f1_filename or 'deleteme.log')
file(
|
f1, 'w').write('')
dirname2 = os.path.join(dirname, 'sub')
os.mkdir(dirname2)
f2 = os.path.join(dirname2, 'deleteme.log')
file(f2, 'w').write('')
fbak = os.path.join(dirname, 'deleteme.bak')
file(fbak, 'w').write('')
self.assertTrue(os.path.exists(f1))
self.assertTrue(os.path.exists(f2))
self.assertTrue(os.path.exists(fbak))
hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, subkey)
hkey.Close()
self.assertTrue(detect_registry_key(keyfull))
self.assertTrue(detect_registry_key('HKCU\\%s' % subkey))
return (dirname, f1, f2, fbak)
def ini2cleaner(filekey, do_next=True):
ini = file(ini_fn, 'w')
ini.write('[someapp]\n')
ini.write('LangSecRef=3021\n')
ini.write(filekey)
ini.write('\n')
ini.close()
self.assertTrue(os.path.exists(ini_fn))
if do_next:
return Winapp(ini_fn).get_cleaners().next()
else:
return Winapp(ini_fn).get_cleaners()
# reuse this path to store a winapp2.ini file in
import tempfile
(ini_h, ini_fn) = tempfile.mkstemp(suffix='.ini', prefix='winapp2')
os.close(ini_h)
# a set of tests
tests = [
# single file
('FileKey1=%s|deleteme.log', None,
False, True, False, True, True, True),
# special characters for XML
('FileKey1=%s|special_chars_&-\'.txt', 'special_chars_&-\'.txt',
False, True, False, True, True, True),
# *.log
('FileKey1=%s|*.LOG', None, False, True, False, True, True, True),
# semicolon separates different file types
('FileKey1=%s|*.log;*.bak', None,
False, True, False, True, False, True),
# *.*
('FileKey1=%s|*.*', None, False, True, False, True, False, True),
# recurse *.*
('FileKey1=%s|*.*|RECURSE', None, False,
True, False, False, False, True),
# remove self *.*, this removes the directory
('FileKey1=%s|*.*|REMOVESELF', None,
False, False, False, False, False, True),
]
# Add positive detection, where the detection believes the application is present,
# to all the tests, which are also positive.
new_tests = []
for test in tests:
for det
|
thecut/thecut-googleanalytics
|
thecut/googleanalytics/migrations/0001_initial.py
|
Python
|
apache-2.0
| 1,911
| 0.003663
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oauth2client.django_orm
class Migration(migrations.Migration):
dependencies = [
('sites',
|
'0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('web_property_id', models.CharField(help_text='The property tracking ID is available when viewing the "Tracking Code" details in the Google Analytics admin.', max_length=25, verbose_name='property tracking ID')),
|
('profile_id', models.CharField(default='', max_length=25, verbose_name='view (profile) ID', blank=True)),
('display_features', models.BooleanField(default=False, help_text='Used for remarketing, demographics and interest reporting.', verbose_name='Use Display advertising features?')),
('is_enabled', models.BooleanField(default=False, help_text='Is Google Analytics tracking enabled on the website?', verbose_name='enabled')),
],
options={
'ordering': ['site'],
'verbose_name': 'view (profile)',
'verbose_name_plural': 'views (profiles)',
},
),
migrations.CreateModel(
name='ProfileOAuth2Credentials',
fields=[
('id', models.OneToOneField(related_name='_oauth2_credentials', primary_key=True, serialize=False, to='googleanalytics.Profile')),
('credentials', oauth2client.django_orm.CredentialsField(null=True)),
],
),
migrations.AddField(
model_name='profile',
name='site',
field=models.OneToOneField(related_name='+', to='sites.Site'),
),
]
|
cjaymes/pyscap
|
src/scap/model/dc_elements_1_1/Rights.py
|
Python
|
gpl-3.0
| 765
| 0.001307
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms o
|
f the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at
|
your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.model.xs.StringType import StringType
class Rights(StringType):
pass
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/firewall_rule.py
|
Python
|
mit
| 1,956
| 0.000511
|
# coding=utf-8
# ------------------------------------------------------------
|
--------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft
|
(R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class FirewallRule(SubResource):
"""Data Lake Analytics firewall rule information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar start_ip_address: The start IP address for the firewall rule. This
can be either ipv4 or ipv6. Start and End should be in the same protocol.
:vartype start_ip_address: str
:ivar end_ip_address: The end IP address for the firewall rule. This can
be either ipv4 or ipv6. Start and End should be in the same protocol.
:vartype end_ip_address: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'start_ip_address': {'readonly': True},
'end_ip_address': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_ip_address': {'key': 'properties.startIpAddress', 'type': 'str'},
'end_ip_address': {'key': 'properties.endIpAddress', 'type': 'str'},
}
def __init__(self):
super(FirewallRule, self).__init__()
self.start_ip_address = None
self.end_ip_address = None
|
lenn0x/Milo-Tracing-Framework
|
src/py/examples/milo_server.py
|
Python
|
apache-2.0
| 861
| 0.012776
|
"""Basic Thrift server that uses the Milo Protocol"
|
""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from helloworld import HelloWorld
from milo.protocols.thrift import TMiloProtocolFactory
from milo.servers.t
|
hrift.TThreadedServer import TDebugThreadedServer
class HelloWorldHandler:
def ping(self, name):
return name
handler = HelloWorldHandler()
processor = HelloWorld.Processor(handler)
transport = TSocket.TServerSocket(9090)
tfactory = TTransport.TFramedTransportFactory()
pfactory = TMiloProtocolFactory()
server = TDebugThreadedServer(processor, transport, tfactory, pfactory)
print 'Starting the server...'
server.serve()
print 'done.'
|
McIntyre-Lab/papers
|
nanni_maize_2022/scripts/merge_tappas_DEA_results.py
|
Python
|
lgpl-3.0
| 4,326
| 0.017337
|
#!/usr/bin/env python
import pandas as pd
import numpy as np
import argparse
def getOptions():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Merge tappAS DEA output files with detection flags")
# Input data
parser.add_argument("-t", "--input-directory", dest="inDir", required=True, help="Input directory of maize tappAS output")
parser.add_argument("-f", "--flag", dest="inFlag", required=True, help="Input TSV of on/off flags")
parser.add_argument("-a", "--annot", dest="inAnnot", required=True, help="Input CSV of transcript_id to gene_id pairs in transcriptome")
parser.add_argument("-e", "--exclude", dest="exclude", required=False, action='append', help="Samples to exclude from expression matrices, multiple values can be listed with each '-e'")
# Output data
parser.add_argument("-o", "--output", dest="outFile", required=True, help="Output file")
args = parser.parse_args()
return args
def main():
# Get on/off flags and transcript_id t
|
o gene_id pairs
flagDF = pd.read_csv(args.inFlag, sep="\t",low_memory=False)
annotDF = pd.read_csv(args.inAnnot)
# Merge gene_id into
|
flags
geneDF = pd.merge(annotDF,flagDF,how='outer',on='transcript_id',indicator='merge_check')
# geneDF['merge_check'].value_counts()
# Fix the 89 single transcript genes missing in EA files
# (have the same value for transcript_id and gene_id)
geneDF = geneDF[geneDF['merge_check']!='left_only']
geneDF['gene_id'] = np.where(geneDF['merge_check']=='right_only',geneDF['transcript_id'],geneDF['gene_id'])
del(geneDF['merge_check'])
# geneDF['gene_id'].isna().any()
geneDF = geneDF.groupby('gene_id')[[c for c in geneDF.columns if 'flag' in c]].max().reset_index()
# Remove excluded columns if provided
if args.exclude is not None:
for s in args.exclude:
geneDF = geneDF.drop(columns=[c for c in geneDF.columns if c==s])
print("Removed {} columns from matrix...".format(s))
# Count and drop transcripts that are not detected in any samples
# and transcripts only detected in one samples
if 'sum_flag' not in geneDF.columns:
geneDF['sum_flag'] = geneDF[[c for c in geneDF.columns if "flag_" in c]].sum(axis=1).astype(int)
print("Detection of genes by number of genotypes:\nnumSamples\tfreq\n{}".format(
geneDF['sum_flag'].value_counts().sort_index()))
print("{} transcripts detected in 0 samples or 1 sample only\n...Dropped from tappas files".format(
len(geneDF[geneDF['sum_flag']<=1])))
detectDF = geneDF[geneDF['sum_flag']>1].copy().reset_index(drop=True)
# Merge genotype tappAS output files
mergeDF = detectDF.copy()
for genotype in ["B73","C123","Hp301","Mo17","NC338"]:
tempDF = pd.read_csv("{}/{}_tappAS_DEA_Genes.tsv".format(args.inDir,genotype),sep="\t")
tempDF['flag_DE_'+genotype] = np.where(tempDF['DEA Result']=="DE",1,0)
tempDF = tempDF.rename(columns={'#Gene':'gene_id',
'(1 - Probability)':'DE_pval_'+genotype,
'Log2FC':'Log2FC_'+genotype,
'Ambient MeanExpLevel':'mean_TPM_ambient_'+genotype,
'Ozone MeanExpLevel':'mean_TPM_ozone_'+genotype})
tempDF = tempDF[['gene_id','DE_pval_'+genotype,'flag_DE_'+genotype,
'Log2FC_'+genotype,'mean_TPM_ambient_'+genotype,'mean_TPM_ozone_'+genotype]]
tempMerge = pd.merge(mergeDF,tempDF,how='outer',on="gene_id",indicator='merge_check')
# tempMerge['merge_check'].value_counts()
print("\t{} genes not detected in {}".format(len(tempMerge[tempMerge['merge_check']=='left_only']),genotype))
del(tempMerge['merge_check'])
tempMerge['flag_detect_DE_'+genotype] = np.where((tempMerge['flag_DE_'+genotype]==1)&
(tempMerge['flag_'+genotype+'_Amb']+tempMerge['flag_'+genotype+'_Ele']>0),1,0)
mergeDF = tempMerge.copy()
# Output merged file of flags and tappas results
mergeDF.to_csv(args.outFile,index=False)
if __name__ == '__main__':
# Parse command line arguments
global args
args = getOptions()
main()
|
megaserg/pants
|
src/python/pants/backend/python/tasks/python_eval.py
|
Python
|
apache-2.0
| 6,421
| 0.009967
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.tasks.python_task import PythonTask
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.base.workunit import WorkUnit, WorkUnitLabel
class PythonEval(PythonTask):
class Error(TaskError):
"""A richer failure exception type useful for tests."""
def __init__(self, *args, **kwargs):
compiled = kwargs.pop('compiled')
failed = kwargs.pop('failed')
super(PythonEval.Error, self).__init__(*args, **kwargs)
self.compiled = compiled
self.failed = failed
_EVAL_TEMPLATE_PATH = os.path.join('templates', 'python_eval', 'eval.py.mustache')
@staticmethod
def _is_evalable(target):
return isinstance(target, (PythonLibrary, PythonBinary))
@classmethod
def register_options(cls, register):
super(PythonEval, cls).register_options(register)
register('--fail-slow', action='store_true', default=False,
help='Compile all targets and present the full list of errors.')
register('--closure', action='store_true', default=False,
help='Eval all targets in the closure individually instead of just the targets '
'specified on the command line.')
def execute(self):
targets = self.context.targets() if self.get_options().closure else self.context.target_roots
with self.invalidated(filter(self._is_evalable, targets),
topological_order=True) as invalidation_check:
compiled = self._compile_targets(invalidation_check.invalid_vts)
return compiled # Collected and returned for tests
def _compile_targets(self, invalid_vts):
with self.context.new_workunit(name='eval-targets', labels=[WorkUnitLabel.MULTITOOL]):
compiled = []
failed = []
for vt in invalid_vts:
target = vt.target
return_code = self._compile_target(target)
if return_code == 0:
vt.update() # Ensure partial progress is marked valid
compiled.append(target)
else:
if self.get_options().fail_slow:
failed.append(target)
else:
raise self.Error('Failed to eval {}'.format(target.address.spec),
compiled=compiled,
failed=[target])
if failed:
msg = 'Failed to evaluate {} targets:\n {}'.format(
len(failed),
'\n '.join(t.address.spec for t in failed))
raise self.Error(msg, compiled=compiled, failed=failed)
return compiled
def _compile_target(self, target):
# "Compiles" a target by forming an isolated chroot of its sources and transitive deps and then
# attempting to import each of the target'
|
s sources in the case of a python library or else the
# entry point in the case of a python binary.
#
# For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:
#
# if __name__ == '__main__':
# import lib.core
# import lib.util
#
# For a binary with entry point lib.bin:main the "compiler" main file would look like:
#
#
|
if __name__ == '__main__':
# from lib.bin import main
#
# In either case the main file is executed within the target chroot to reveal missing BUILD
# dependencies.
with self.context.new_workunit(name=target.address.spec):
modules = []
if isinstance(target, PythonBinary):
source = 'entry_point {}'.format(target.entry_point)
components = target.entry_point.rsplit(':', 1)
module = components[0]
if len(components) == 2:
function = components[1]
data = TemplateData(source=source,
import_statement='from {} import {}'.format(module, function))
else:
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
else:
for path in target.sources_relative_to_source_root():
if path.endswith('.py'):
if os.path.basename(path) == '__init__.py':
module_path = os.path.dirname(path)
else:
module_path, _ = os.path.splitext(path)
source = 'file {}'.format(os.path.join(target.target_base, path))
module = module_path.replace(os.path.sep, '.')
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
if not modules:
# Nothing to eval, so a trivial compile success.
return 0
interpreter = self.select_interpreter_for_targets([target])
if isinstance(target, PythonBinary):
pexinfo, platforms = target.pexinfo, target.platforms
else:
pexinfo, platforms = None, None
generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH),
chroot_parent=self.chroot_cache_dir, modules=modules)
executable_file_content = generator.render()
chroot = self.cached_chroot(interpreter=interpreter,
pex_info=pexinfo,
targets=[target],
platforms=platforms,
executable_file_content=executable_file_content)
pex = chroot.pex()
with self.context.new_workunit(name='eval',
labels=[WorkUnitLabel.COMPILER, WorkUnitLabel.RUN,
WorkUnitLabel.TOOL],
cmd=' '.join(pex.cmdline())) as workunit:
returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
self.context.log.error('Failed to eval {}'.format(target.address.spec))
return returncode
|
Yelp/pushmanager
|
pushmanager/servlets/addrequest.py
|
Python
|
apache-2.0
| 2,713
| 0.001843
|
import pushmanager.core.db as db
import pushmanager.core.util
from pushmanager.core.db import InsertIgnore
from pushmanager.core.mail import MailQueue
from pushmanager.core.requesthandler import RequestHandler
from pushmanager.core.xmppclient import XMPPQueue
class AddRequestServlet(RequestHandler):
def post(self):
if not self.current_user:
return self.send_error(403)
self.pushid = pushmanager.core.util.get_int_arg(self.request, 'push')
self.request_ids = self.request.arguments.get('request', [])
insert_queries = [
InsertIgnore(db.push_pushcontents, ({'request': int(i), 'push': self.pushid}))
for i in self.request_ids
]
update_query = db.push_requests.update().where(
db.push_requests.c.id.in_(self.request_ids)).values({'state': 'added'})
request_query = db.push_requests.select().where(
db.push_requests.c.id.in_(self.request_ids))
db.execute_transaction_cb(insert_queries + [update_query, request_query], self.on_db_complete)
# allow both GET and POST
get = post
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
for req in db_results[-1]:
if req['watchers']:
user_string = '%s (%s)' % (req['user'], req['watchers'])
users = [req['user']] + req['watchers'].split(',')
else:
user_string = req['user']
users = [req['user']]
msg =
|
(
"""
<p>
%(pushmaster)s has accepted request for %(user)s into a push:
</p>
<p>
|
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Regards,<br />
PushManager
</p>"""
) % pushmanager.core.util.EscapedDict({
'pushmaster': self.current_user,
'user': user_string,
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
})
subject = "[push] %s - %s" % (user_string, req['title'])
MailQueue.enqueue_user_email(users, msg, subject)
msg = '{0} has accepted request "{1}" for {2} into a push:\n{3}/push?id={4}'.format(
self.current_user,
req['title'],
user_string,
self.get_base_url(),
self.pushid,
)
XMPPQueue.enqueue_user_xmpp(users, msg)
|
SINGROUP/pycp2k
|
pycp2k/classes/_force_eval2.py
|
Python
|
lgpl-3.0
| 1,808
| 0.002212
|
from pycp2k.inputsection import InputSection
from ._external_potential1 import _external_potential1
from ._rescale_forces1 import _rescale_forces1
from ._mixed1 import _mixed1
from ._dft1 import _dft1
from ._mm1 import _mm1
from ._qmmm1 import _qmmm1
from ._eip1 import _eip1
from ._bsse1 import _bsse1
from ._subsys1 import _subsys1
from ._properties1 import _properties1
from ._print64 import _print64
class _force_eval2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Method = None
self.Stress_tensor = None
self.Embed = None
self.EXTERNAL_POTENTIAL_list = []
self.RESCALE_FORCES = _rescale_forces1()
self.MIXED = _mixed1()
self.DFT = _dft1()
self.MM = _mm1()
self.QMMM = _qmmm1()
self.EIP = _eip1
|
()
self.BSSE = _bsse1()
self.SUBSYS = _subsys1()
self.PROPERTIES = _properties1()
self.PRINT = _print64()
self._name = "FORCE_EVAL"
self._keywords = {'Method': 'METHOD', 'Embed': 'EMBED', 'Stress_tensor': 'STRESS_TENSOR'}
self._subsections = {'PRINT': 'PRINT', 'MIXED': 'MIXED', 'EIP': 'EIP', 'SUBSYS': 'SUBSYS', 'RESCALE_FORCES': 'RESCALE_F
|
ORCES', 'PROPERTIES': 'PROPERTIES', 'DFT': 'DFT', 'QMMM': 'QMMM', 'BSSE': 'BSSE', 'MM': 'MM'}
self._repeated_subsections = {'EXTERNAL_POTENTIAL': '_external_potential1'}
self._attributes = ['EXTERNAL_POTENTIAL_list']
def EXTERNAL_POTENTIAL_add(self, section_parameters=None):
new_section = _external_potential1()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.EXTERNAL_POTENTIAL_list.append(new_section)
return new_section
|
berquist/PyQuante
|
Tests/integrals.py
|
Python
|
bsd-3-clause
| 3,015
| 0.026866
|
#!/usr/bin/env python
"""\
Test the 2e integral code.
"""
from PyQuante.pyints import contr_coulomb as pycc
from PyQuante.rys import contr_coulomb as ryscc
from PyQuante.hgp import contr_coulomb as hgpcc
from PyQuante.cints import contr_coulomb as ccc
from PyQuante.crys import contr_coulomb as cryscc
from PyQuante.chgp import contr_coulomb as chgpcc
from PyQuante.CGBF import CGBF
from PyQuante.cints import ijkl2intindex
from PyQuante.Ints import getbasis
from PyQuante.Molecule import Molecule
from time import time
def get2ints(bfs,coul_func):
"""Store integrals in a long array in the form (ij|kl) (chemists
notation. We only need i>=j, k>=l, and ij <= kl"""
from array import array
nbf = len(bfs)
totlen = nbf*(nbf+1)*(nbf*nbf+nbf+2)/8
Ints = array('d',[0]*totlen)
for i in range(nbf):
for j in range(i+1):
ij = i*(i+1)/2+j
for k in range(nbf):
for l in range(k+1):
kl = k*(k+1)/2+l
if ij >= kl:
ijkl = ijkl2intindex(i,j,k,l)
Ints[ijkl] = coulomb(bfs[i],bfs[j],bfs[k],bfs[l],
coul_func)
return Ints
def coulomb(a,b,c,d,coul_func):
"Coulomb interaction between 4 contracted Gaussians"
Jij = coul_func(a.pexps,a.pcoefs,a.pnorms,a.origin,a.powers,
b.pexps,b.pcoefs,b.pnorms,b.origin,b.powers,
c.pe
|
xps,c.pcoefs,c.pnorms,c.origin,c.powers,
d.pexps,d.pcoefs,d.pnorms,d.origin,d.powers)
return a.norm*b.norm*c.norm*d.norm*Jij
def maxdiff(a,b):
md = -1e10
for i in range(len(a)):
md = max(md,a[i]-b[i])
return md
def test()
|
:
#from PyQuante.Basis.sto3g import basis_data
from PyQuante.Basis.p631ss import basis_data
r = 1/0.52918
atoms=Molecule('h2o',atomlist = [(8,(0,0,0)),(1,(r,0,0)),(1,(0,r,0))])
inttol = 1e-6 # Tolerance to which integrals must be equal
bfs = getbasis(atoms,basis_data)
print "Int times: "
t0 = time()
int0 = get2ints(bfs,chgpcc)
t1 = time()
print "CHGP Ints: ",t1-t0
int1 = get2ints(bfs,cryscc)
t2 = time()
print "CRys Ints: ",t2-t1
assert maxdiff(int0,int1)<inttol
int1 = get2ints(bfs,ccc)
t3 = time()
print "CINTS Ints: ",t3-t2
assert maxdiff(int0,int1)<inttol
ints1 = get2ints(bfs,hgpcc)
t4 = time()
print "HGP Ints: ",t4-t3
assert maxdiff(int0,int1)<inttol
int1 = get2ints(bfs,ryscc)
t5 = time()
print "Rys Ints: ",t5-t4
assert maxdiff(int0,int1)<inttol
int1 = get2ints(bfs,pycc)
t6 = time()
print "Py Ints: ",t6-t5
assert maxdiff(int0,int1)<inttol
if __name__ == '__main__': test()
# Sample times (9/28/09, Macbook Air:)
# Int times:
# CHGP Ints: 3.02386283875
# CRys Ints: 2.28243303299
# CINTS Ints: 6.17023396492
# HGP Ints: 250.576164007
# Rys Ints: 204.740512133
# Py Ints: 295.842331886
|
s-monisha/LibreHatti
|
src/librehatti/bills/views.py
|
Python
|
gpl-2.0
| 7,704
| 0.005452
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from librehatti.catalog.models import PurchaseOrder
from librehatti.catalog.models import Category
from librehatti.catalog.models import PurchasedItem
from librehatti.catalog.models import ModeOfPayment
from librehatti.catalog.models import Product
from librehatti.catalog.models import HeaderFooter
from librehatti.catalog.models import Surcharge
from librehatti.catalog.request_change import request_notify
from librehatti.bills.models import QuotedTaxesApplied
from librehatti.bills.models import QuotedOrder
from librehatti.bills.models import QuotedBill
from librehatti.bills.models import QuotedItem
from librehatti.bills.models import QuotedOrderofSession
from librehatti.bills.models import QuotedOrderNote
from librehatti.bills.models import NoteLine
from librehatti.bills.forms import SelectNoteForm
from librehatti.bills.forms import ItemSelectForm
from librehatti.suspense.models import QuotedSuspenseOrder
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
import useraccounts
from django.db.models import Sum
from django.db.models import Max
import simplejson
from django.core.urlresolvers import reverse
from librehatti.voucher.models import FinancialSession
"""
This view calculate taxes on quoted order, bill data
and save those values in database.
"""
@login_required
def quoted_bill_cal(request):
old_post = request.session.get('old_post')
quoted_order_id = request.session.get('quoted_order_id')
quoted_order = QuotedOrder.objects.get(id=quoted_order_id)
quoted_order_obj = QuotedOrder.objects.values('total_discount').\
get(id=quoted_order_id)
quoted_item = QuotedItem.objects.\
filter(quoted_order=quoted_order_id).aggregate(Sum('price'))
total = quoted_item['price__sum']
price_total = total - quoted_order_obj['total_discount']
totalplusdelivery = price_total
surcharge = Surcharge.objects.values('id', 'value', 'taxes_included')
delivery_rate = Surcharge.objects.values('value').\
filter(tax_name='Transportation')
distance = QuotedSuspenseOrder.objects.\
filter(quoted_order=quoted_order_id).aggregate(Sum('distance_estimated'))
if distance['distance_estimated__sum']:
delivery_charges = int(distance['distance_estimated__sum'])*\
delivery_rate[0]['value']
totalplusdelivery = totalplusdelivery + delivery_charges
else:
delivery_charges = 0
for value in surcharge:
surcharge_id = value['id']
surcharge_value = value['value']
surcharge_tax = value['taxes_included']
if surcharge_tax == 1:
taxes = round((totalplusdelivery * surcharge_value)/100)
surcharge_obj = Surcharge.objects.get(id=surcharge_id)
taxes_applied = QuotedTaxesApplied(quoted_order=quoted_order,
surcharge=surcharge_obj, tax=taxes)
taxes_applied.save()
taxes_applied_obj = QuotedTaxesApplied.objects.\
filter(quoted_order=quoted_order_id).aggregate(Sum('tax'))
tax_total = taxes_applied_obj['tax__sum']
grand_total = price_total + tax_total + delivery_charges
amount_received = grand_total
bill = QuotedBill(quoted_order=quoted_order, total_cost=price_total,
total_tax=tax_total, grand_total=grand_total,
delivery_charges=delivery_charges, amount_received=amount_received,
totalplusdelivery=totalplusdelivery)
bill.save()
request.session['old_post'] = old_post
request.session['quoted_order_id'] = quoted_order_id
return HttpResponseRedirect(reverse("librehatti.bills.views.select_note"))
@login_required
def quoted_order_added_success(request):
quoted_order_id = request.session.get('quoted_order_id')
details = QuotedOrder.objects.values('buyer__first_name',\
'buyer__last_name', 'buyer__customer__address__street_address',\
'buyer__customer__title', 'buyer__customer__address__city').\
filter(id=quoted_order_id)[0]
return render(request, 'bills/quoted_success.html', {'details':details,
'quoted_order_id':quoted_order_id})
@login_required
def select_note(request):
quoted_order_id = request.session.get('quoted_order_id')
form = SelectNoteForm(initial={'quoted_order':quoted_order_id})
request_status = request_notify()
return render(request, 'bills/select_note.html', \
{'form':form, 'request':request_status})
@login_required
def select_note_save(request):
if request.method == 'POST':
form = SelectNoteForm(request.POST)
if form.is_valid():
formdata = form.cleaned_data
quoted_order = formdata['quoted_order']
quoted_order_id = QuotedOrder.objects.get(id=quoted_order)
note_list = []
for note in formdata['note_line']:
note_list.append(note)
for value in note_list:
obj = QuotedOrderNote(quoted_order=quoted_order_id, note=value)
obj.save()
return HttpResponseRedirect(\
reverse("librehatti.bills.views.quoted_order_added_success"))
else:
return HttpResponseRedirect(\
reverse("librehatti.bills.views.quoted_order_added_success"))
else:
error_type = "404 Forbidden"
error = "Please again place the order"
temp = {'type': error_type, 'message':error}
return render(request, 'error_page.html', temp)
@login_required
def new_note_line(request):
note_line = request.GET['note_line']
obj = NoteLine(note=note_line)
obj.save()
return HttpResponse('')
@login_required
def delete_note(request):
delete_note = request.GET['delete_note']
delete_note_id = delete_note.split(',')
for id in delete_note_id:
NoteLine.objects.filter(id=id).delete()
return HttpResponse('')
@login_required
def quoted_order_of_session(request):
old_post = request.session.get('old_post')
quoted_order_id = request.session.get('quoted_order_id')
quoted_order = QuotedOrder.obje
|
cts.get(id=quoted_or
|
der_id)
quoted_order_obj = QuotedOrder.objects.values('id', 'date_time').\
get(id=quoted_order_id)
quoted_order_date = quoted_order_obj['date_time']
financialsession = FinancialSession.objects.\
values('id', 'session_start_date', 'session_end_date')
for value in financialsession:
start_date = value['session_start_date']
end_date = value['session_end_date']
if start_date <= quoted_order_date <= end_date:
session_id = value['id']
session = FinancialSession.objects.get(id=session_id)
max_id = QuotedOrderofSession.objects.all().aggregate(Max('id'))
if max_id['id__max'] == None:
obj = QuotedOrderofSession(quoted_order=quoted_order,\
session=session, quoted_order_session=1)
obj.save()
else:
quoted_order_of_session = QuotedOrderofSession.objects.\
values('quoted_order_session', 'session').get(id=max_id['id__max'])
if quoted_order_of_session['session'] == session_id:
obj = QuotedOrderofSession(quoted_order=quoted_order,\
session=session, quoted_order_session=\
quoted_order_of_session['quoted_order_session']+1)
obj.save()
else:
obj = QuotedOrderofSession(quoted_order=quoted_order,\
session=session, quoted_order_session=1)
obj.save()
request.session['old_post'] = old_post
request.session['quoted_order_id'] = quoted_order_id
return HttpResponseRedirect(\
reverse("librehatti.suspense.views.quoted_add_distance"))
|
SmartElect/SmartElect
|
help_desk/models.py
|
Python
|
apache-2.0
| 20,453
| 0.001125
|
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from libya_elections.abstract import AbstractTimestampTrashBinModel
from libya_elections.libya_bread import StartEndTimeFormatterMixin, TimestampFormatterMixin
from libya_elections.phone_numbers import FormattedPhoneNumberMixin, PhoneNumberField
BUTTON_GO_BACK = "goback"
BUTTON_START_OVER = "startover"
BUTTON_YES = "yes"
BUTTON_NO = "no"
BUTTON_NO_CITIZEN = "no_citizen"
BUTTON_MATCH = "match"
BUTTON_NO_MATCH = "nomatch"
BUTTON_CONTINUE = "continue"
BUTTON_HUNG_UP = "hungup"
BUTTON_SUBMIT = "submit"
BUTTON_UNABLE = "unable"
BUTTON_DONE = "done"
BUTTON_CHOICES = (
(BUTTON_GO_BACK, _('Previous screen')),
(BUTTON_START_OVER, _('Start from Beginning')),
(BUTTON_YES, _('Yes')),
(BUTTON_NO, _('No')),
(BUTTON_NO_CITIZEN, _('No, Caller is a Citizen')),
(BUTTON_MATCH, _('Name and ID match')),
(BUTTON_NO_MATCH, _('Name and ID do not match')),
(BUTTON_CONTINUE, _('Continue')),
(BUTTON_HUNG_UP, _('Caller hung up')),
(BUTTON_SUBMIT, _('Submit')),
(BUTTON_UNABLE, _('Unable to Provide')),
(BUTTON_DONE, _('Done')),
)
# To make it easier to look up the text for a button
BUTTON_TEXT = dict(BUTTON_CHOICES)
# CSS class to use for rendering each button, if not overridden by the screen.
DEFAULT_BUTTON_CLASSES = {
BUTTON_GO_BACK: 'info',
BUTTON_YES: 'success', # BLUE
BUTTON_MATCH: 'success', # BLUE
BUTTON_CONTINUE: 'success', # BLUE
BUTTON_SUBMIT: 'success',
BUTTON_DONE: 'success',
BUTTON_NO: 'warning', # RED
BUTTON_NO_CITIZEN: 'warning', # RED
BUTTON_UNABLE: 'warning', # RED
BUTTON_NO_MATCH: 'warning', # RED
BUTTON_HUNG_UP: 'inverse', # BLACK
}
# Group names - for permissions
HELP_DESK_OPERATORS_GROUP = "Help Desk Operators"
HELP_DESK_SUPERVISORS_GROUP = "Help Desk Supervisors"
HELP_DESK_SENIOR_STAFF_GROUP = "Help Desk Senior Staff"
HELP_DESK_VIEW_GROUP = "Help Desk Viewers"
HELP_DESK_MANAGERS_GROUP = "Help Desk Managers"
# Permissions to give each group
# Operators are not quite a superset of the view-only permissions because operators
# don't necessarily need to be able to read the reports, I don't think.
# After that, though, each group contains the previous group's permissions,
# plus some new ones.
HELP_DESK_GROUP_PERMISSIONS = {
HELP_DESK_VIEW_GROUP: [
'help_desk.read_report',
'help_desk.read_case',
],
HELP_DESK_OPERATORS_GROUP: [
'help_desk.add_case',
'help_desk.change_case',
'help_desk.read_case'
],
HELP_DESK_SUPERVISORS_GROUP: [
HELP_DESK_VIEW_GROUP,
HELP_DESK_OPERATORS_GROUP,
'help_desk.add_operator',
'help_desk.add_update',
'help_desk.cancel_registration_change',
'help_desk.mark_case',
],
HELP_DESK_SENIOR_STAFF_GROUP: [
HELP_DESK_SUPERVISORS_GROUP,
'help_desk.recommend_case',
],
HELP_DESK_MANAGERS_GROUP: [
HELP_DESK_SENIOR_STAFF_GROUP,
'help_desk.browse_fieldstaff',
'help_desk.read_fieldstaff',
'help_desk.add_fieldstaff',
'help_desk.add_senior_staff',
'help_desk.add_supervisor',
'help_desk.add_viewonly',
'help_desk.change_fieldstaff',
'help_desk.change_staff_password',
'help_desk.resolve_case',
'help_desk.suspend_fieldstaff',
],
}
def all_help_desk_groups():
return list(Group.objects.filter(name__in=HELP_DESK_GROUP_PERMISSIONS.keys(),))
# 1. Citizen has complained about the service.
# Action: Mark as seen
#
# 2. Accidentally unlocked.
# Action: Re-lock or Ignore
#
# 3. Citizen changes their mind about unlocking.
# Action: Re-lock or Ignore
#
# 4. Phone call was harassing.
# Action: Mark as seen
REASONS_TO_MARK = (
('complaint', _('Person has complained about the service.')),
('accident', _('Accidentally unlocked.')),
('changed_mind', _('Person on the end of the phone changes their mind about unlocking.')),
('harassing', _('Phone call was harassing. ')),
('other', _('Other reason, see comments.')),
)
ALLOWED_ACTIONS = {
'complaint': ['seen'],
'accident': ['relock', 'ignore'],
'changed_mind': ['relock', 'ignore'],
'harassing': ['seen'],
'other': ['seen'],
}
CASE_ACTIONS = (
('seen', _('Mark as seen.')),
('relock', _('Re-lock.')),
('ignore', _('Ignore.')),
)
class FieldStaff(FormattedPhoneNumberMixin, AbstractTimestampTrashBinModel):
# Note that field staff are not website "User"s. Or if they are,
# we don't know or care.
name = models.CharField(_('name'), max_length=160, default='')
staff_id =
|
models.IntegerField(
_('staff id'),
unique=True,
validators=[
MinValueValidator(100),
MaxValueValidator(999),
]
)
phone_number = PhoneNumb
|
erField(_('phone number'))
suspended = models.BooleanField(_('suspended'), blank=True, default=False)
class Meta:
ordering = ['name', 'staff_id']
verbose_name = _("field staff member")
verbose_name_plural = _("field staff members")
permissions = [
('browse_fieldstaff', _('Can browse field staff')),
('read_fieldstaff', _('Can read field staff')),
('suspend_fieldstaff', _('Can suspend field staff')), # Custom
]
def __str__(self): # pragma: no cover
return '%s (%s)' % (self.name, self.formatted_phone_number())
def get_absolute_url(self):
return reverse('read_fieldstaff', args=[self.pk])
# Note: Choices are char fields instead of integers to make it easier
# for other applications to make use of the data in the database.
class Case(StartEndTimeFormatterMixin, AbstractTimestampTrashBinModel):
# start and end time of the call
start_time = models.DateTimeField(_('start time'), default=now)
end_time = models.DateTimeField(_('end time'), null=True, blank=True)
current_screen = models.ForeignKey(
'help_desk.ScreenRecord', related_name='current_case', verbose_name=_('current screen'),
null=True, blank=True,
on_delete=models.SET_NULL,
)
# Incoming phone number
phone_number = PhoneNumberField(
_('phone number'),
null=True,
default=None,
blank=True, # Older cases didn't collect this information
)
# operator handling the call
operator = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('operator'),
related_name='cases_as_operator',
on_delete=models.CASCADE)
# field staff making the call
field_staff = models.ForeignKey('FieldStaff', null=True, blank=True,
verbose_name=_('field staff'),
related_name='help_desk_cases',
on_delete=models.PROTECT,
)
# citizen whose registration is being worked with
citizen = models.ForeignKey('civil_registry.Citizen',
verbose_name=_('citizen'),
related_name='help_desk_cases',
on_delete=models.PROTECT,
# Nullable because when a call starts we don't know who
# it's for (and might never find out).
null=True, blank=True,
)
@property
def blocked(self):
"""Return True if there's a corresponding citizen and they're blocked from registering
and voting"""
return self.citizen and self.citizen.blocked
#: whether user's changes on their registered phone have been increased
changes_increased = models.BooleanField(_('changes increased'), default=False)
reg
|
EVEModX/Mods
|
mods/CustomThemeColor/ThemeConfig.py
|
Python
|
mit
| 143
| 0.006993
|
ThemeID = 'UI/Co
|
lorThemes/Custom'
#themeID, baseColor, hiliteColor
THEMES = (('UI/ColorThemes/Custom
|
', (0.05, 0.05, 0.05), (0.4, 0.8, 1.0)),)
|
tuxlifan/moneyguru
|
core/tests/gui/general_ledger_table_test.py
|
Python
|
gpl-3.0
| 4,362
| 0.005961
|
# Created By: Virgil Dupras
# Created On: 2010-09-09
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.testutil import eq_
from ...model.account import AccountType
from ..base import TestApp, with_app
# ---
@with_app(TestApp)
def test_new_item_in_empty_table(app):
# Since we have no txn, we have nothing to show in the gltable. Performing new item has no
# effect.
app.show_glview()
app.mw.new_item() # no crash
eq_(len(app.gltable), 0)
# ---
def app_two_sided_txn():
app = TestApp()
app.add_accounts('foo', 'bar')
app.add_txn(description='hello', from_='foo', to='bar', amount='42')
app.show_glview()
return app
@with_app(app_two_sided_txn)
def test_delete_entry(app):
app.gltable.select([4]) # the 'foo' entry of 'hello'
app.mw.delete_item()
# both entries were removed
eq_(len(app.gltable), 0)
@with_app(app_two_sided_txn)
def test_dont_
|
show_empty_accounts(app):
# When accounts have nothing to show, don't put them in the table.
app.drsel.select_prev_date_range()
eq_(len(app.gltable), 0)
@with_app(app_two_sided_txn)
def test_new_entry_with_account_row_selected(app):
# Adding a new entry when the account row is selected
|
doesn't cause a crash and creates a new
# entry.
app.gltable.select([0])
app.mw.new_item() # no crash
eq_(len(app.gltable), 7)
eq_(app.gltable.selected_indexes, [2])
@with_app(app_two_sided_txn)
def test_rows_data_with_two_sided_txn(app):
# In a general ledger, we end up with 6 lines: 2 account lines (titles), two entry lines as well
# as two total lines.
eq_(len(app.gltable), 6)
ACCOUNT_ROWS = {0, 3}
BOLD_ROWS = {2, 5}
EDITABLE_ROWS = {1, 4}
for i in range(6):
eq_(app.gltable.is_account_row(app.gltable[i]), i in ACCOUNT_ROWS)
eq_(app.gltable.is_bold_row(app.gltable[i]), i in BOLD_ROWS)
eq_(app.gltable.can_edit_cell('description', i), i in EDITABLE_ROWS)
eq_(app.gltable[0].account_name, 'bar')
eq_(app.gltable[3].account_name, 'foo')
eq_(app.gltable[1].description, 'hello')
eq_(app.gltable[1].debit, '42.00')
eq_(app.gltable[4].description, 'hello')
eq_(app.gltable[4].credit, '42.00')
@with_app(app_two_sided_txn)
def test_set_amount_without_shown_account(app):
# Previously, setting an amount while mainwindow.shown_account to None resulted in a crash
app.gltable[1].debit = '42' # no crash
eq_(app.gltable[1].debit, '42.00')
# ---
def app_txns_in_different_date_ranges():
app = TestApp()
app.drsel.select_month_range()
app.add_accounts('foo', 'bar')
app.add_txn(date='10/08/2010', description='first', from_='foo', to='bar', amount='42')
app.add_txn(date='10/09/2010', description='second', from_='foo', to='bar', amount='42')
app.drsel.select_prev_date_range()
app.show_glview()
return app
@with_app(app_txns_in_different_date_ranges)
def test_edit_item(app):
# the table correctly updates txn selection so that when edit item is called, the right txn
# shown up in the panel.
tpanel = app.mw.edit_item()
eq_(tpanel.description, 'first')
@with_app(app_txns_in_different_date_ranges)
def test_only_show_rows_in_date_range(app):
# Rows that are out of the date range aren't shown.
eq_(app.gltable[1].description, 'first')
@with_app(app_txns_in_different_date_ranges)
def test_previous_balance_rows(app):
# We show previous balance rows where appropriate
app.drsel.select_next_date_range()
eq_(app.gltable[1].description, 'Previous Balance')
eq_(app.gltable[1].balance, '42.00')
assert app.gltable.is_bold_row(app.gltable[1])
# ---
def app_txn_in_income():
app = TestApp()
app.add_account('foo', account_type=AccountType.Income)
app.add_account('bar')
app.add_txn(description='hello', from_='foo', to='bar', amount='42')
app.show_glview()
return app
@with_app(app_txn_in_income)
def test_balance_cell_is_empty_for_income_entries(app):
# Balance doesn't make any sense in income/expense, so don't show it
row = app.gltable[4] # income account shown second
eq_(row.balance, '')
|
bodik10/Combinatorics
|
core.py
|
Python
|
gpl-3.0
| 3,143
| 0.013309
|
"модуль із класом Core що включає в собі деякі методи комбінаторики, які використовуються в програмі"
import itertools
from math import factorial as fact
class Core:
# Методи-генератори комбінацій/перестановок
#perm = itertools.permutations # перестановки
comb = itertools.combinations # комбінації без повторень (порядок не враховується)
comb_w_repl = itertools.combinations_with_replacement # комбінації із повторенями (порядок не враховує
|
ться)
@staticmethod
def place(seq, N, prev=[]): # розміщеня із seq по N (порядок ВРАХОВУЄТЬСЯ)
for char in seq:
|
res = prev + [char]
if len(res) == N:
yield res
else:
new_s = list(seq)
new_s.remove(char)
for res in Core.place(new_s, N, prev=res):
yield res
@staticmethod
def place_w_repl(seq, depth, prev=()): # розміщення із повторенями (порядок ВРАХОВУЄТЬСЯ)
for char in tuple(seq):
res = prev + (char,)
if len(res) == depth:
yield res
else:
for res in Core.place_w_repl(seq, depth, prev=res):
yield res
@staticmethod
def perm(seq, uselessK): # перестановки
for res in itertools.permutations(seq):
yield res
#
# Методи для обчислення кількості комбінацій/перестановок
@staticmethod
def comb_number(seq, k):
N = len(seq)
return int(fact(N)/(fact(k) * fact(N-k)))
@staticmethod
def comb_w_repl_number(seq, k):
N = len(seq)
return int(fact(N+k-1)/(fact(k) * fact(N-1)))
@staticmethod
def place_number(seq, k):
N = len(seq)
return int(fact(N)/fact(N-k))
@staticmethod
def place_w_repl_number(seq, k):
N = len(seq)
return N**k
@staticmethod
def perm_number(seq, uselessK=None):
N = len(seq)
return fact(N)
# тестування
if __name__ == '__main__':
for i in Core.place_w_repl("abcd", 3):
print(i, end=" ")
print(Core.place_w_repl_number("abcd", 3))
for i in Core.place("abcdef", 3):
print(i, end=" ")
print(Core.place_number("abcdef", 3))
print(len(list(Core.perm(range(10), None))), Core.perm_number(range(10), None))
l=[]
for i in Core.comb(range(15545), 15):
l.append(i)
if len(l)>50: break
print(l)
print( len ( list(Core.place_w_repl("01", 8)) ) )
print( len ( list(Core.place("abcca", 3)) ) )
|
dimddev/NetCatKS
|
examples/projects/rootapis/web_time_server/components/adapters/time/__init__.py
|
Python
|
bsd-2-clause
| 587
| 0.003407
|
from datetime import datetime
from NetCatKS.Logger import Logger
from NetCatKS.Components import BaseRootAPI
class Time(BaseRootAP
|
I):
def __init__(self, factory):
super(Time, self).__init__(factory)
self.factory = factory
self.logger = Logger()
def process_factory(self):
if self.factory.time == 'get':
self.factory.time = str(datetime.now())
else:
self.factory.time = 'service unavailable'
self.logger.debug('IN TIME API: {}'.format(self.factory.to_dict()))
re
|
turn self.factory
|
franklingu/leetcode-solutions
|
questions/find-the-difference/Solution.py
|
Python
|
mit
| 760
| 0.001316
|
"""
Given two strings s and t which consist of only lowercase letters.
String t is generated by random shuffling string s and then add one more letter at a random position.
Find the le
|
tter that was added in t.
Example:
Input:
s = "abcd"
t = "abcde"
Output:
e
Explanation:
'e' is the letter that was added.
"""
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
mapping = [0] * 26
for c in s:
mapping[ord(c) - ord('a')] += 1
for c in t:
mapping[ord(c) - ord('a')] -= 1
for idx, num in enumerate(mapping):
if num < 0:
|
return str(chr(ord('a') + idx))
return None
|
clarkduvall/serpy
|
setup.py
|
Python
|
mit
| 1,452
| 0
|
from codecs import open
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='serpy',
version='0.3.1',
description='ridiculously fast object serialization',
long_description=long_description,
url='https://github.com/clarkduvall/serpy',
author='Clark DuVall',
author_email='clark.duvall@gmail.com
|
',
license='MIT',
install_requires=['six'],
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :
|
: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords=('serialization', 'rest', 'json', 'api', 'marshal',
'marshalling', 'validation', 'schema', 'fast'),
packages=find_packages(exclude=[
'contrib',
'docs',
'tests*',
'benchmarks'
]),
)
|
Elucidation/ChessboardDetect
|
base_imgload.py
|
Python
|
mit
| 538
| 0.01487
|
from __future__ import print_function
from pylab im
|
port *
import numpy as
|
np
import cv2
import sys
def main(filenames):
for filename in filenames:
print("Processing %s" % filename)
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Using opencv
cv2.imshow('image %dx%d' % (img.shape[1],img.shape[0]),img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
if len(sys.argv) > 1:
filenames = sys.argv[1:]
else:
filenames = ['input/6.jpg']
main(filenames)
|
John078/DEV6B
|
Herkansing6B/test_unit.py
|
Python
|
mit
| 2,881
| 0.03228
|
import unittest
import os
import flask
from model import *
from controller import controller
from Herkansing6B import views
from Herkansing6B import app
import tempfile
modelClass = Model()
class Test_Model_InputuserTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
#test InputuserTest function in combination with the /inputusertest route (source: views.py)
def test_model_InputuserTest(self):
self.assertTrue(modelClass.InputuserTest("A","B","C","D","E","f","g","h","i","j"), ["a","b","c","d","e","f","g"
|
,"h","i","j"])
class Test_Model_ResultTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
#test ResultTest function in combination with the /re
|
sulttest route (source: views.py)
def test_model_ResultTest_score_is_100(self):
self.assertTrue(modelClass.ResultTest(["a","b","c","d","e","f","g","h","i","j"], ["a","b","c","d","e","f","g","h","i","j"]), 100)
#test ResultTest function in combination with the /resulttest route (source: views.py)
def test_model_ResultTest_score_is_60(self):
self.assertTrue(modelClass.ResultTest(["a","b","c","d","e","f","d","c","b","a"], ["a","b","c","d","e","f","g","h","i","j"]), 60)
class Test_Model_Easyanswers(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
#test Easyanswers function in combination with the /easytest route (source: views.py)
def test_model_Easyanswers(self):
response = self.app.post('/easytest', data = dict(), follow_redirects=True)
self.assertTrue(["to smoke", "laughing", "to go", "help", "to quit", "shouting", "to cook", "to open", "to change", "smoking"], response.data)
class Test_Model_Mediumanswers(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
#test Mediumanswers function in combination with the /mediumtest route (source: views.py)
def test_model_Mediumanswers(self):
response = self.app.post('/mediumtest', data = dict(), follow_redirects=True)
self.assertTrue(["getting", "stealing", "reading", "to go", "going", "to speak", "working", "to talk", "going", "playing"], response.data)
class Test_Model_Hardanswers(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
#test Hardanswers function in combination with the /hardtest route (source: views.py)
def test_model_Hardanswers(self):
response = self.app.post('/hardtest', data = dict(), follow_redirects=True)
self.assertTrue(["to help", "to do", "to become", "becoming", "to travel", "to be", "to speak", "seeing", "to call", "to go"], response.data)
if __name__ == '__main__':
unittest.main()
|
antoinecarme/pyaf
|
tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_0/ar_12/test_artificial_32_RelativeDifference_MovingAverage_0_12_0.py
|
Python
|
bsd-3-clause
| 276
| 0.083333
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, tr
|
endtype = "MovingAverage", cycle_length = 0, transform = "RelativeDifference", sigma = 0.0, exog_count = 0,
|
ar_order = 12);
|
badele/serialkiller-plugins
|
setup.py
|
Python
|
gpl-3.0
| 1,664
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
PYPI_RST_FILTERS = (
# Replace code-blocks
(r'\.\.\s? code-block::\s*(\w|\+)+', '::'),
# Replace image
(r'\.\.\s? image::.*', ''),
# Remove travis ci badge
(r'.*travis-ci\.org/.
|
*', ''),
# Remove pypip.in badges
(r'.*pypip\.in/.*', ''),
(r'.*crate\.io/.*', ''),
(r'.*coveralls\.io/.*', ''),
)
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis c
|
i build badge
'''
content = open(filename).read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content
def required(filename):
with open(filename) as f:
packages = f.read().splitlines()
return packages
setup(
name="serialkiller-plugins",
version="0.0.2",
description="Plugins for serialkiller project",
long_description=rst('README.rst') + rst('CHANGELOG.txt'),
author="Bruno Adelé",
author_email="Bruno Adelé <bruno@adele.im>",
url="https://github.com/badele/serialkiller-plugins",
license="GPL",
install_requires=required('requirements/base.txt'),
setup_requires=[],
tests_require=[
'pep8',
'coveralls'
],
test_suite='tests',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=[],
classifiers=[
'Programming Language :: Python',
],
)
|
gem/oq-engine
|
openquake/hazardlib/gsim_lt.py
|
Python
|
agpl-3.0
| 22,252
| 0
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2010-2022, GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <htt
|
p://www.gnu.org/licenses/>.
import io
import os
import ast
import json
import logging
import operator
import itertools
from collections import namedtuple, defaultdict
import toml
import numpy
from openquake.baselib import hdf5
from openquake.baselib.node import Node as N, context
from openquake.baselib.general import duplicat
|
ed, BASE94, group_array
from openquake.hazardlib import valid, nrml, pmf, lt, InvalidFile
from openquake.hazardlib.gsim.mgmpe.avg_poe_gmpe import AvgPoeGMPE
from openquake.hazardlib.gsim.base import CoeffsTable
from openquake.hazardlib.imt import from_string
BranchTuple = namedtuple('BranchTuple', 'trt id gsim weight effective')
class InvalidLogicTree(Exception):
pass
# manage the legacy logicTreeBranchingLevel nodes
def bsnodes(fname, branchinglevel):
if branchinglevel.tag.endswith('logicTreeBranchingLevel'):
if len(branchinglevel) > 1:
raise InvalidLogicTree(
'%s: Branching level %s has multiple branchsets'
% (fname, branchinglevel['branchingLevelID']))
return branchinglevel.nodes
elif branchinglevel.tag.endswith('logicTreeBranchSet'):
return [branchinglevel]
else:
raise ValueError('Expected BranchingLevel/BranchSet, got %s' %
branchinglevel)
def fix_bytes(record):
# convert a record with bytes fields into a dictionary of strings
dic = {}
for n in record.dtype.names:
v = record[n]
dic[n] = v.decode('utf-8') if isinstance(v, bytes) else v
return dic
class ImtWeight(object):
"""
A composite weight by IMTs extracted from the gsim_logic_tree_file
"""
def __init__(self, branch, fname):
with context(fname, branch.uncertaintyWeight):
nodes = list(branch.getnodes('uncertaintyWeight'))
if 'imt' in nodes[0].attrib:
raise InvalidLogicTree('The first uncertaintyWeight has an imt'
' attribute')
self.dic = {'weight': float(nodes[0].text)}
imts = []
for n in nodes[1:]:
self.dic[n['imt']] = float(n.text)
imts.append(n['imt'])
if len(set(imts)) < len(imts):
raise InvalidLogicTree(
'There are duplicated IMTs in the weights')
def __mul__(self, other):
new = object.__new__(self.__class__)
if isinstance(other, self.__class__):
keys = set(self.dic) | set(other.dic)
new.dic = {k: self[k] * other[k] for k in keys}
else: # assume a float
new.dic = {k: self.dic[k] * other for k in self.dic}
return new
__rmul__ = __mul__
def __add__(self, other):
new = object.__new__(self.__class__)
if isinstance(other, self.__class__):
new.dic = {k: self.dic[k] + other[k] for k in self.dic}
else: # assume a float
new.dic = {k: self.dic[k] + other for k in self.dic}
return new
__radd__ = __add__
def __truediv__(self, other):
new = object.__new__(self.__class__)
if isinstance(other, self.__class__):
new.dic = {k: self.dic[k] / other[k] for k in self.dic}
else: # assume a float
new.dic = {k: self.dic[k] / other for k in self.dic}
return new
def is_one(self):
"""
Check that all the inner weights are 1 up to the precision
"""
return all(abs(v - 1.) < pmf.PRECISION for v in self.dic.values() if v)
def __getitem__(self, imt):
try:
return self.dic[imt]
except KeyError:
return self.dic['weight']
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.dic)
def keyno(branch_id, bsno, brno, fname='', chars=BASE94):
"""
:param branch_id: a branch ID string
:param bsno: number of the branchset (starting from 0)
:param brno: number of the branch in the branchset (starting from 0)
:returns: a short unique alias for the branch_id
"""
if not set(branch_id) <= set(chars):
raise ValueError('%s %s' % (ex, fname))
return chars[brno] + str(bsno)
class GsimLogicTree(object):
"""
A GsimLogicTree instance is an iterable yielding `Realization`
tuples with attributes `value`, `weight` and `lt_path`, where
`value` is a dictionary {trt: gsim}, `weight` is a number in the
interval 0..1 and `lt_path` is a tuple with the branch ids of the
given realization.
:param str fname:
full path of the gsim_logic_tree file
:param tectonic_region_types:
a sequence of distinct tectonic region types
:param ltnode:
usually None, but it can also be a
:class:`openquake.hazardlib.nrml.Node` object describing the
GSIM logic tree XML file, to avoid reparsing it
"""
@classmethod
def from_(cls, gsim):
"""
Generate a trivial GsimLogicTree from a single GSIM instance.
"""
ltbranch = N('logicTreeBranch', {'branchID': 'b1'},
nodes=[N('uncertaintyModel', text=str(gsim)),
N('uncertaintyWeight', text='1.0')])
lt = N('logicTree', {'logicTreeID': 'lt1'},
nodes=[N('logicTreeBranchingLevel', {'branchingLevelID': 'bl1'},
nodes=[N('logicTreeBranchSet',
{'applyToTectonicRegionType': '*',
'branchSetID': 'bs1',
'uncertaintyType': 'gmpeModel'},
nodes=[ltbranch])])])
return cls('fake/' + gsim.__class__.__name__, ['*'], ltnode=lt)
def __init__(self, fname, tectonic_region_types=['*'], ltnode=None):
# tectonic_region_types usually comes from the source models
self.filename = fname
trts = sorted(tectonic_region_types)
if len(trts) > len(set(trts)):
raise ValueError(
'The given tectonic region types are not distinct: %s' %
','.join(trts))
self.values = defaultdict(list) # {trt: gsims}
self._ltnode = ltnode or nrml.read(fname).logicTree
self.bsetdict = {}
self.shortener = {}
self.branches = self._build_trts_branches(trts) # sorted by trt
if trts != ['*']:
# reduce self.values to the listed TRTs
values = {}
for trt in trts:
values[trt] = self.values[trt]
if not values[trt]:
raise InvalidLogicTree('%s is missing the TRT %r' %
(fname, trt))
self.values = values
if trts and not self.branches:
raise InvalidLogicTree(
'%s is missing in %s' % (set(tectonic_region_types), fname))
@property
def req_site_params(self):
site_params = set()
for trt in self.values:
for gsim in self.values[trt]:
site_params.update(gsim.REQUIRES_SITES_PARAMETERS)
return site_params
def check_imts(self, imts):
"""
Make sure the IMTs are recognized by all GSIMs in the logic tree
"""
for trt in self.values:
for gsim in self.values[trt]:
for attr in dir(gsim):
coeffs = getattr(gsim, attr)
if not
|
puttarajubr/commcare-hq
|
corehq/ex-submodules/casexml/apps/phone/data_providers/case/load_testing.py
|
Python
|
bsd-3-clause
| 1,857
| 0.001616
|
from copy import deepcopy
from casexml.apps.case.models import CommCareCase
from casexml.apps.phone.data_providers.case.utils import CaseSyncUpdate
from casexml.apps.phone.xml import get_case_element
from corehq.toggles import ENABLE_LOADTEST_USERS
def get_loadtest_factor(domain, user):
"""
Gets the loadtest factor for a domain and user. Is always 1 unless
both the toggle is enabled for the domain, and the user has a non-zero,
non-null factor set.
|
"""
if domain and ENABLE_LOADTEST_USERS.enabled(domain):
return getattr(user, 'loadtest_factor', 1) or 1
return 1
def transform_loadtest_update(update, factor):
"""
Returns a new CaseSyncUpdate object (from an existing one) with all the
case IDs and names mapped to have the factor appended.
"""
def _map_id(id, count):
return '{}-{}'.f
|
ormat(id, count)
case = CommCareCase.wrap(deepcopy(update.case._doc))
case._id = _map_id(case._id, factor)
for index in case.indices:
index.referenced_id = _map_id(index.referenced_id, factor)
case.name = '{} ({})'.format(case.name, factor)
return CaseSyncUpdate(case, update.sync_token, required_updates=update.required_updates)
def append_update_to_response(response, update, restore_state):
"""
Adds the XML from the case_update to the restore response.
If factor is > 1 it will append that many updates to the response for load testing purposes.
"""
current_count = 0
original_update = update
while current_count < restore_state.loadtest_factor:
element = get_case_element(update.case, update.required_updates, restore_state.version)
response.append(element)
current_count += 1
if current_count < restore_state.loadtest_factor:
update = transform_loadtest_update(original_update, current_count)
|
kelvan/pyspaceshooter
|
objects.py
|
Python
|
gpl-3.0
| 7,112
| 0
|
import pygame
from utils import load_image, laserSound, rocketSound, explodeSound, \
missleExplosion, scream, load_sliced_sprites, chickenSound
import stats
class Shot(pygame.sprite.Sprite):
def __init__(self, x, y, image, enemies, power, speed, maxspeed=None):
pygame.sprite.Sprite.__init__(self)
screen = pygame.display.get_surface()
self.image, self.rect = load_image(image, -1)
self.area = screen.get_rect()
self.rect.centerx = x
self.enemies = enemies
self.rect.bottom = y
self.speed = speed
if maxspeed:
self.maxspeed = maxspeed
else:
self.maxspeed = speed / 2
self.power = power
p = min(stats.bonus['power'], 1.5)
width = int(p * self.image.get_width())
height = int(p * self.image.get_height())
self.image = pygame.transform.scale(self.image, (width, height))
def update(self):
s = min(self.speed * stats.bonus['bspeed'], self.maxspeed)
self.move(s)
def move(self, dy):
newpos = self.rect.move((0, self.speed * stats.bonus['bspeed']))
if not self.area.contains(newpos):
if self.rect.bottom < self.area.top:
self.kill()
self.check_collide()
self.rect = newpos
def check_collide(self):
for c in pygame.sprite.spritecollide(self, self.enemies, False):
self.kill()
stats.damage += self.power * stats.bonus['power']
c.hit(self.power * stats.bonus['power'])
class Rocket(Shot):
def __init__(self, x, y, enemies, power=5, speed=-5):
Shot.__init__(self, x, y, 'rocket.png', enemies, power, speed)
class Laser(Shot):
def __init__(self, x, y, enemies, power=2, speed=-10):
Shot.__init__(self, x, y, 'laser.png', enemies, power, speed)
class SuperChicken(Shot):
def __init__(self, x, y, enemies, power=200, speed=-25):
Shot.__init__(self, x, y, 'superchicken.png', enemies, power, speed)
class Fighter(pygame.sprite.Sprite):
def __init__(sel
|
f, enemies,
|
speed=8, maxspeed=20):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image('fighter.png', -1)
self.screen = pygame.display.get_surface()
self.area = self.screen.get_rect()
self.speed = speed
self.maxspeed = maxspeed
self.dx = 0
self.enemies = enemies
self.shooting = False
self.rect.bottom = self.area.bottom
self.rect.centerx = self.area.centerx
self.shots = pygame.sprite.Group()
self._ticks = pygame.time.get_ticks()
self._rocketDelay = 500
self._last_update = 0
self._start = pygame.time.get_ticks()
self.shotType = None
def update(self):
if self.shooting:
self.shotType()
if self.dx:
self.move_side(self.dx)
self.shots.update()
self.shots.draw(self.screen)
def move_side(self, dx):
dx *= stats.bonus['speed']
if self.rect.left + dx <= self.area.left:
self.rect.left = self.area.left
elif self.rect.right + dx >= self.area.right:
self.rect.right = self.area.right
else:
self.rect.centerx += dx
def move(self, dx, dy):
self.rect.midtop = (self.rect.x + dx, self.rect.y + dy)
def shootRocket(self, delay=500, mindelay=200):
d = min(delay / stats.bonus['rspeed'], mindelay)
if pygame.time.get_ticks() - self._last_update > d:
self._last_update = pygame.time.get_ticks()
rocket = Rocket(self.rect.centerx, self.rect.top, self.enemies)
self.shots.add(rocket)
stats.shots += 1
rocketSound.play()
def shootLaser(self, delay=200, mindelay=100):
d = min(delay / stats.bonus['rspeed'], mindelay)
if pygame.time.get_ticks() - self._last_update > d:
self._last_update = pygame.time.get_ticks()
laser = Laser(self.rect.centerx, self.rect.top, self.enemies)
self.shots.add(laser)
stats.shots += 1
laserSound.play()
def shootSuperChicken(self, delay=1000, mindelay=750):
d = min(delay / stats.bonus['rspeed'], mindelay)
if pygame.time.get_ticks() - self._last_update > d:
self._last_update = pygame.time.get_ticks()
chicken = SuperChicken(self.rect.centerx, self.rect.top,
self.enemies)
self.shots.add(chicken)
stats.shots += 1
chickenSound.play()
class Enemy(pygame.sprite.Sprite):
def __init__(self, image, life, speed, minspeed=None, bombdelay=None,
bomb=None, explodeimg='explode.png'):
if not minspeed:
self.minspeed = speed / 2
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image(image, -1)
screen = pygame.display.get_surface()
self.area = screen.get_rect()
self.speed = speed
self.life = life
self.exploding = False
self._last_update = 0
self._frame = 0
self._start = pygame.time.get_ticks()
self._delay = 100
self.explodeSound = explodeSound
self.explodeImages = load_sliced_sprites(128, 128, explodeimg)
def update(self):
if self.exploding:
self.explode()
else:
self._move()
def _move(self):
if self.rect.left > self.area.right or \
self.rect.right < self.area.left:
if self.rect.bottom <= self.area.bottom - 128:
self.rect.centery += 40
else:
self._breakthrough()
self.speed *= -1.05
if self.rect.left > self.area.right:
self.rect.left = self.area.right
else:
self.rect.right = self.area.left
else:
if self.speed > 0:
s = max(self.speed / stats.bonus['slowdown'], -self.minspeed)
else:
s = min(self.speed / stats.bonus['slowdown'], self.minspeed)
self.rect = self.rect.move((s, 0))
def _breakthrough(self):
stats.life -= 1
scream.play()
self.kill()
def hit(self, power):
self.life -= power
missleExplosion.play()
if self.life <= 0:
self.exploding = True
self._start = pygame.time.get_ticks()
def explode(self):
t = pygame.time.get_ticks()
if self._frame == 0:
self.explodeSound.play()
self.rect.centery -= 40
if t - self._last_update > self._delay:
self._frame += 1
if self._frame >= len(self.explodeImages):
self._frame = 0
self.kill()
stats.kills += 1
self.image = self.explodeImages[self._frame]
self._last_update = t
class Ufo(Enemy):
def __init__(self):
Enemy.__init__(self, 'ufo.png', 25, 8)
self.rect.topright = 0, 30
stats.spawned += 1
|
zenranda/proj10-final
|
test_main.py
|
Python
|
artistic-2.0
| 2,958
| 0.014875
|
###
#Various nose tests. If you want to adapt this for your own use, be aware that the start/end block list has a very specific formatting.
###
import date_chopper
import arrow
from pymongo import MongoClient
import secrets.admin_secrets
import secrets.client_secrets
MONGO_CLIENT_URL = "mongodb://{}:{}@localhost:{}/{}".format(
secrets.client_secrets.db_user,
secrets.client_secrets.db_user_pw,
secrets.admin_secrets.port,
secrets.client_secrets.db)
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, secrets.client_secrets.db)
collection = db.dated
base_size = collection.count() #current size of the db, for comparison later
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
def test_overlap(): #Given a sample list, check to see if its dates overlap at all
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T11:00:00', '2016-11-20T15:00:00']]
assert (date_chopper.date_overlap(ranges)) == False
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T11:00:00', '2016-11-20T15:00:00'], ['2016-11-20T16:00:00', '2016-11-20T18:00:00'], ['2016-11-20T17:00:00', '2016-11-20T19:00:00']]
assert (date_chopper.date_overlap(ranges)) == True
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T10:30:00', '2016-11-20T13:30:00'], ['2016-11-20T13:00:00', '2016-11-20T14:00:00'], ['2016-11-20T14:10:00', '2016-11-20T16:00:00']]
assert (date_chopper.date_overlap(ranges)) == True
def test_underlap(): #tests if the program can detect s
|
tart times that go out of bounds
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T11:00:00', '2016-11-20T15:00:00
|
']]
start = arrow.get('2016-11-20T05:00:00')
assert (date_chopper.date_underlap(ranges, start)) == True
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T11:00:00', '2016-11-20T15:00:00']]
start = arrow.get('2016-11-20T10:00:00')
assert (date_chopper.date_underlap(ranges, start)) == False
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T11:00:00', '2016-11-20T15:00:00']]
start = arrow.get('2016-11-20T16:00:00')
assert (date_chopper.date_underlap(ranges, start)) == False
ranges = [['2016-11-20T08:30:00', '2016-11-20T10:30:00'], ['2016-11-20T11:00:00', '2016-11-20T15:00:00']]
start = arrow.get('2016-11-20T10:00:00')
assert (date_chopper.date_underlap(ranges, start)) == False
def test_db(): #tests basic DB operation
assert collection != None
collection.insert({"type" : "freebusy", "entry" : [["entry 1"], ["entry 2"]]})
assert base_size < collection.count()
collection.remove({"entry" : [["entry 1"], ["entry 2"]]})
assert base_size == collection.count()
|
zzyxzz/T2-list-parser
|
t2-parser.py
|
Python
|
mit
| 4,480
| 0.012723
|
import os
import csv
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfdevice import PDFDevice
from pdfminer.layout import LAParams, LTTextBox, LTLine, LTTex
|
tLine
from pdfminer.converter import PDFPageAggregator
#set the path for the pdf file of T2 sponsors list.
dir = os.path.dirname(os.path.realpath(__file__))
rel_path = "files/Tier_2_5_Register_of_Sponsors_2015-05-01.pdf"
pdfpath = os.path.join(dir,rel_path)
print pdfpath
field_names = ["Organisation Name","Town/City","Tier &
|
Rating","Sub Tier"]
#open the pdf file of T2 sponsor list.
with open(pdfpath, "r") as pdf_file:
#create a parser object of the file.
parser = PDFParser(pdf_file)
#create a PDF document object to store the document structure.
doc = PDFDocument(parser)
#check whether the document allows text extraction. If not, abort.
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
#create a PDF resource manager object to store shared resources.
rsrcmgr = PDFResourceManager()
#set parameters for analysis.
laparams = LAParams(line_margin=0.2)
#create a PDF page aggregator object.
device = PDFPageAggregator(rsrcmgr,laparams=laparams)
#create a PDF intepreter object to process each page.
interpreter = PDFPageInterpreter(rsrcmgr, device)
page_dict = {}
#set column locations of the table.
col1 = 24.0
col2 = 351.0
col3 = 576.0
col4 = 678.0
#set top margin of the table.
top_h1 = 396.0
top_h2 = 568.0
#set keys of each table column.
col1_key = int(col1)
col2_key = int(col2)
col3_key = int(col3)
col4_key = int(col4)
#initialise page_dict that stores columns of a row in the table.
page_dict[col1_key] = ""#field_names[0]
page_dict[col2_key] = ""#field_names[1]
page_dict[col3_key] = ""#field_names[2]
page_dict[col4_key] = ""#field_names[3]
#open and wrtie data.csv file.
with open("data.csv","wb") as data:
writer = csv.writer(data)
#process each page contained in the PDF document.
for i,page in enumerate(PDFPage.create_pages(doc)):
#page_content that stores table elements in current page.
page_content = []
#process each page.
interpreter.process_page(page)
#receive the LTPage object for the page.
layout = device.get_result()
print "page {}".format(i+1)
# if i == 2: break
#choose correct top margin for page 1.
if i == 0:
top_h = top_h1
else:
top_h = top_h2
#process each child objects within LTPage object.
for obj in layout:
#select only LTTextBox and LTLine objects.
if isinstance(obj,LTTextBox) or isinstance(obj,LTLine):
#get x0,y0 position.
x0 = obj.bbox[0]
y0 = obj.bbox[1]
#if col_key is table columns, store the object it in page_content.
col_key = int(x0)
if col_key in page_dict and y0 < top_h:
page_content.append(obj)
#sort page_content by y0 position.
page_content.sort(key=lambda x: x.bbox[1], reverse=True)
#iterate sorted page_content.
for obj in page_content:
#if it is a LTLine object.
if isinstance(obj,LTLine):
#combine columns into a row.
row=[page_dict[col1_key],page_dict[col2_key],
page_dict[col3_key],page_dict[col4_key]]
#write the row into csv file.
writer.writerow([s.encode("utf-8") for s in row])
# print "Line here {}".format(ob.bbox)
#reset page_dict to store columns of next row.
page_dict[col1_key] = ""
page_dict[col2_key] = ""
page_dict[col3_key] = ""
page_dict[col4_key] = ""
#if it is a LTTextBox object.
else:
# #store it to corresponding column.
page_dict[int(obj.bbox[0])] += obj.get_text()
# print ob.get_text()
|
msherry/PyXB-1.1.4
|
tests/trac/test-trac-0071.py
|
Python
|
apache-2.0
| 5,134
| 0.002922
|
import pyxb_114.binding.generate
import pyxb_114.binding.datatypes as xs
import pyxb_114.binding.basis
import pyxb_114.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="urn:trac-0071" targetNamespace="urn:trac-0071">
<xs:element name="MetadataDocument" type="tns:MetadataType"/>
<xs:complexType name="MetadataType">
<xs:sequence maxOccurs="1" minOccurs="1">
<xs:element name="template" type="xs:string"/>
<xs:element name="timespan" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element name="field" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element name="name" type="xs:string"/>
<xs:element name="value" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="lang" type="xs:language"/>
<xs:attribute name="user" type="xs:string"/>
<xs:attribute name="timestamp" type="xs:dateTime"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute name="track" type="xs:string"/>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute name="start" type="xs:string"/>
<xs:attribute name="end" type="xs:string"/>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:schema>
'''
#file('schema.xsd', 'w').write(xsd)
code = pyxb_114.binding.generate.GeneratePython(schema_text=xsd)
#file('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb_114.exceptions_ import *
import unittest
class TestTrac_0071 (unittest.TestCase):
def test (self):
newdoc = MetadataDocument()
metadatadoc_type = MetadataDocument.typeDefinition()
timespan_element = metadatadoc_type._ElementMap['timespan'].elementBinding()
timespan_type = timespan_element.typeDefinition()
field_element = timespan_type._ElementMap['field'].elementBinding()
field_type = field_element.typeDefinition()
value_element = field_type._ElementMap['value'].elementBinding()
value_type = value_element.typeDefinition()
newdoc.template = 'anewtemplate'
field = field_type('title', pyxb_114.BIND('foo', lang='ENG'), _element=field_element)
self.assertTrue(isinstance(field.value_, list))
self.assertEqual(1, len(field.value_))
self.assertTrue(isinstance(field.value_[0], value_type))
field.validateBinding()
self.assertEqual('<field><name>title</name><value lang="ENG">foo</value></field>', field.toxml("utf-8", root_only=True))
field = field_type(name='title', _element=field_elemen
|
t)
field.value_.append(pyxb_114.BIND('foo', lang='ENG'))
self.assertTrue(isinstance(field.value_, list))
self.assertEqual(1, len(field.value_))
|
self.assertTrue(isinstance(field.value_[0], pyxb_114.BIND))
field.validateBinding()
self.assertTrue(isinstance(field.value_[0], pyxb_114.BIND))
self.assertEqual('<field><name>title</name><value lang="ENG">foo</value></field>', field.toxml("utf-8", root_only=True))
'''
NOT YET FINISHED
newdoc.timespan.append(pyxb_114.BIND( # Single timespan
pyxb_114.BIND( # First field instance
'title',
pyxb_114.BIND('foo', lang='ENG')
),
start='-INF', end='+INF'))
newdoc.validateBinding()
timespan = newdoc.timespan[0]
#self.assertTrue(isinstance(timespan, timespan_type))
print newdoc.toxml("utf-8")
newdoc.timespan[:] = []
newdoc.timespan.append(pyxb_114.BIND( # Single timespan
pyxb_114.BIND( # First field instance
name='title',
value=pyxb_114.BIND('foo', lang='ENG')
),
start='-INF', end='+INF'))
newdoc.validateBinding()
timespan = newdoc.timespan[0]
#self.assertTrue(isinstance(timespan, timespan_type))
print newdoc.toxml("utf-8")
newdoc.timespan[:] = []
newdoc.timespan.append(pyxb_114.BIND( # Single timespan
pyxb_114.BIND( # First field instance
name='title',
value_=pyxb_114.BIND('foo', lang='ENG')
),
start='-INF', end='+INF'))
newdoc.validateBinding()
timespan = newdoc.timespan[0]
#self.assertTrue(isinstance(timespan, timespan_type))
print newdoc.toxml("utf-8")
newdoc.timespan[:] = []
'''
if __name__ == '__main__':
unittest.main()
|
csabatini/deploy-azurerm-django
|
azure_ad_auth/views.py
|
Python
|
bsd-3-clause
| 2,354
| 0.000425
|
from .backends import AzureActiveDirectoryBackend
from django.conf import set
|
tings
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.
|
decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
import requests
import urlparse
import uuid
import json
@never_cache
def auth(request):
backend = AzureActiveDirectoryBackend()
redirect_uri = request.build_absolute_uri(reverse(complete))
nonce = str(uuid.uuid4())
request.session['nonce'] = nonce
state = str(uuid.uuid4())
request.session['state'] = state
login_url = backend.login_url(
redirect_uri=redirect_uri,
nonce=nonce,
state=state
)
return HttpResponseRedirect(login_url)
@never_cache
@csrf_exempt
def complete(request):
backend = AzureActiveDirectoryBackend()
redirect_uri = request.build_absolute_uri(reverse(complete))
method = 'GET' if backend.RESPONSE_MODE == 'fragment' else 'POST'
original_state = request.session.get('state')
state = getattr(request, method).get('state')
if original_state == state:
code = getattr(request, method).get('code')
nonce = request.session.get('nonce')
if code is not None:
data = backend.token_params(
redirect_uri=redirect_uri,
code=code,
)
url = data.pop('endpoint', None)
token_response = requests.post(url, data=data)
payload = json.loads(token_response.text)
id_token = payload['id_token']
request.session['access_token'] = payload['access_token']
request.session['code'] = code
user = backend.authenticate(token=id_token, nonce=nonce)
if user is not None:
login(request, user)
return HttpResponseRedirect(get_login_success_url(request))
return HttpResponseRedirect('failure')
def get_login_success_url(request):
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, '')
netloc = urlparse.urlparse(redirect_to)[1]
if not redirect_to:
redirect_to = settings.AAD_REDIRECT_URL
elif netloc and netloc != request.get_host():
redirect_to = settings.AAD_REDIRECT_URL
return redirect_to
|
jamesbeebop/evennia
|
evennia/server/profiling/dummyrunner_settings.py
|
Python
|
bsd-3-clause
| 8,806
| 0.000227
|
"""
Settings and actions for the dummyrunner
This module defines dummyrunner settings and sets up
the actions available to dummy accounts.
The settings are global variables:
TIMESTEP - time in seconds between each 'tick'
CHANCE_OF_ACTION - chance 0-1 of action happening
CHANCE_OF_LOGIN - chance 0-1 of login happening
TELNET_PORT - port to use, defaults to settings.TELNET_PORT
ACTIONS - see below
ACTIONS is a tuple
```
(login_func, logout_func, (0.3, func1), (0.1, func2) ... )
```
where the first entry is the function to call on first connect, with a
chance of occurring given by CHANCE_OF_LOGIN. This function is usually
responsible for logging in the account. The second entry is always
called when the dummyrunner disconnects from the server and should
thus issue a logout command. The other entries are tuples (chance,
func). They are picked randomly, their commonality based on the
cumulative chance given (the chance is normalized between all options
so if will still work also if the given chances don't add up to 1).
Since each function can return a list of game-command strings, each
function may result in multiple operations.
An action-function is called with a "client" argument which is a
reference to the dummy client currently performing the action. It
returns a string or a list of command strings to execute. Use the
client object for optionally saving data between actions.
The client object has the following relevant properties and methods:
- key - an optional client key. This is only used for dummyrunner output.
Default is "Dummy-<cid>"
- cid - client id
- gid - globally unique id, hashed with time stamp
- istep - the current step
- exits - an empty list. Can be used to store exit names
- objs - an empty list. Can be used to store object names
- counter() - returns a unique increasing id, hashed with time stamp
to make it unique also between dummyrunner instances.
The return should either be a single command string or a tuple of
command strings. This list of commands will always be executed every
TIMESTEP with a chance given by CHANCE_OF_ACTION by in the order given
(no randomness) and allows for setting up a more complex chain of
commands (such as creating an account and logging in).
---
"""
# Dummy runner settings
# Time between each dummyrunner "tick", in seconds. Each dummy
# will be called with this frequency.
TIMESTEP = 2
# Chance of a dummy actually performing an action on a given tick.
# This spreads out usage randomly, like it would be in reality.
CHANCE_OF_ACTION = 0.5
# Chance of a currently unlogged-in dummy performing its login
# action every tick. This emulates not all accounts logging in
# at exactly the same time.
CHANCE_OF_LOGIN = 1.0
# Which telnet port to connect to. If set to None, uses the first
# default telnet port of the running server.
TELNET_PORT = None
# Setup actions tuple
# some convenient templates
DUMMY_NAME = "Dummy-%s"
DUMMY_PWD = "password-%s"
START_ROOM = "testing_room_start_%s"
ROOM_TEMPLATE = "testing_room_%s"
EXIT_TEMPLATE = "exit_%s"
OBJ_TEMPLATE = "testing_obj_%s"
TOBJ_TEMPLATE = "testing_button_%s"
TOBJ_TYPECLASS = "contrib.tutorial_examples.red_button.RedButton"
# action function definitions (pick and choose from
# these to build a client "usage profile"
# login/logout
def c_login(client):
"logins to the game"
# we always use a new client name
cname = DUMMY_NAME % client.gid
cpwd = DUMMY_PWD % client.gid
# set up for digging a first room (to move to and keep the
# login room clean)
roomname = ROOM_TEMPLATE % client.counter()
exitname1 = EXIT_TEMPLATE % client.counter()
exitname2 = EXIT_TEMPLATE % client.counter()
client.exits.extend([exitname1, exitname2])
cmds = (
"create %s %s" % (cname, cpwd),
"connect %s %s" % (cname, cpwd),
"@dig %s" % START_ROOM % client.gid,
"@teleport %s" % START_ROOM % client.gid,
"@dig %s = %s, %s" % (roomname, exitname1, exitname2),
)
return cmds
def c_login_nodig(client):
"logins, don't dig its own room"
cname = DUMMY_NAME % client.gid
cpwd = DUMMY_PWD % client.gid
cmds = ("create %s %s" % (cname, cpwd), "connect %s %s" % (cname, cpwd))
return cmds
def c_logout(client):
"logouts of the game"
return "@quit"
# random commands
def c_looks(client):
"looks at various objects"
cmds = ["look %s" % obj for obj in client.objs]
if not cmds:
cmds = ["look %s" % exi for exi in client.exits]
if not cmds:
cmds = "look"
return cmds
def c_examines(client):
"examines various objects"
cmds = ["examine %s" % obj for obj in client.objs]
if not cmds:
cmds = ["examine %s" % exi for exi in client.exits]
if not cmds:
cmds = "examine me"
return cmds
def c_idles(client):
"idles"
cmds = ("idle", "idle")
return cmds
def c_help(client):
"reads help files"
cmds = ("help", "help @teleport", "help look", "help @tunnel", "help @dig")
return cmds
def c_digs(client):
"digs a new room, storing exit names on client"
roomname = ROOM_TEMPLATE % client.counter()
exitname1 = EXIT_TEMPLATE % client.counter()
exitname2 = EXIT_TEMPLATE % client.counter()
client.exits.extend([exitname1, exitname2])
return "@dig/tel %s = %s, %s" % (roomname, exitname1, exitname2)
def c_creates_obj(client):
"creates normal objects, storing their name on client"
objname = OBJ_TEMPLATE % client.counter()
client.objs.append(objname)
cmds = (
"@create %s" % objname,
'@desc %s = "this is a test object' % objname,
"@set %s/testattr = this is a test attribute value." % objname,
"@set %s/testattr2 = this is a second test attribute." % objname,
)
return cmds
def c_creates_button(client):
"creates example button, storing name on client"
objname = TOBJ_TEMPLATE % client.counter()
client.objs.append(objname)
cmds = ("@create %s:%s" % (objname, TOBJ_TYPECLASS), "@desc %s = test red button!" % objname)
return cmds
def c_socialize(client):
"socializechats on channel"
cmds = (
"ooc Hello!",
"ooc Testing ...",
"ooc Testing ... times 2",
"say Yo!",
"emote stands looking around.",
)
return cmds
def c_moves(client):
"moves to a previously created room, using the stored exits"
cmds = client.exits # try all exits - finally one will work
return "look" if not cmds else cmds
def c_moves_n(client):
"move through north exit if available"
return "north"
def c_moves_s(client):
"move through south exit if available"
return "south"
# Action tuple (required)
#
# This is a tuple of client action functions. The first element is the
# function the client should use to log into the game and move to
# STARTROOM . The second element is the logout command, for cleanly
# exiting the mud. The following elements are 2-tuples of (probability,
# action_function). The probablities should normally sum up to 1,
# otherwise the system will normalize them.
#
# "normal builder" definitionj
# ACTIONS = ( c_login,
# c_logout,
# (0.5, c_looks),
# (0.08, c_ex
|
amines),
# (0.1, c_help),
# (0.01, c_digs),
#
|
(0.01, c_creates_obj),
# (0.3, c_moves))
# "heavy" builder definition
# ACTIONS = ( c_login,
# c_logout,
# (0.2, c_looks),
# (0.1, c_examines),
# (0.2, c_help),
# (0.1, c_digs),
# (0.1, c_creates_obj),
# #(0.01, c_creates_button),
# (0.2, c_moves))
# "passive account" definition
# ACTIONS = ( c_login,
# c_logout,
# (0.7, c_looks),
# #(0.1, c_examines),
# (0.3, c_help))
# #(0.1, c_digs),
# #(0.1, c_creates_obj),
# #(0.1, c_creates_button),
# #(0.4, c_moves))
# "inactive account" definition
# ACTIONS = (c_login_nodig,
# c_logout,
# (1.0, c_idles))
# "normal account" definition
ACTIONS = (c_login, c_logout, (0.01, c_digs)
|
zentralopensource/zentral
|
zentral/contrib/santa/migrations/0026_configuration_allow_unknown_shard.py
|
Python
|
apache-2.0
| 1,128
| 0.001773
|
# Generated by Django 2.2.24 on 2021-11-11 08:49
import django.core.validators
from django.db import migrations, models
from django.db.models import Count
def add_shard_to_no_rule_configurations(apps, schema_editor):
Configuration = apps.get_model("santa", "Configuration")
for configuration in Configuration.objects.annotate(num_rules=Count('rule')).filter(num_rules
|
=0):
configuration.allow_unknown_shard = 5
configuration.save()
class Migration(migrations.Migration):
dependencies = [
('santa', '0025_auto_20210921_1517'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='allow_unknown_shard',
field=models.IntegerField(
defa
|
ult=100,
help_text="Restrict the reporting of 'Allow Unknown' events to a percentage (0-100) of hosts",
validators=[django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100)]
),
),
migrations.RunPython(add_shard_to_no_rule_configurations),
]
|
mlell/tapas
|
scripts/src/indel.py
|
Python
|
mit
| 16,326
| 0.011889
|
#!/usr/bin/env python
import argparse
import csv
import random
import sys
import math
from math import inf
from textwrap import dedent
from editCIGAR import CIGAR
from textwrap import dedent
# Don't throw an error if output is piped into a program that doesn't
# read all of its input, like `head`.
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
description = dedent("""
Insert insertions and deletions randomly into a sequence recieved
via standard input.
Expected input format: For every input line: A string of nucleotides
followed by a tab character, followed by a corresponding CIGAR
string. Example:
–––––––––––––––––––––––
seq cigar
|
GGTGACATAAAGGC 8M5I
TTCCGCAGGG 10M
CTCGTGGAGT 5M2D5M
....
–––––––––––––––––––––––
(white space stands for one tab character)
If no CIGAR strings are available for the nucleotides, then use the
parameter --cigar-new. In that case, only a nucleotide string per line is
expected. Every read is expected to have complete (mis)match to the
reference (CIGAR character M).
""")
d
|
ef parse_arguments(argv):
p = argparse.ArgumentParser(description=description,
formatter_class= argparse.RawTextHelpFormatter)
for s,l in [('in','insert'),('del','deletion')]:
p.add_argument('--{}-prob'.format(s), default=0,
type = checkPositive(float), metavar='P', help=
"Per-base probability of a seqence {}".format(l))
p.add_argument('--{}-exp'.format(s), default=None, metavar = 'L',
type = float, help=dedent("""\
Length distribution of {}s shall be exponentially
distributed, with 50%% of reads longer than L""")\
.format(l))
p.add_argument('--cigar-new', default = False,
action='store_true', help=dedent("""\
Do not read CIGAR strings from standard input, but assume a
complete (mis)match (no indels, CIGAR character M) for every
nucleotide string."""))
p.add_argument('--col-seq', default = 'seq', type = str,
help = "Column name of the nucleotide strings")
p.add_argument('--col-cigar', default = 'cigar', type = str,
help = "Column name of the CIGAR strings")
# p.add_argument('--input-fmt', default = ['lines'],
# nargs='+', metavar='',
# help =dedent("""\
# Format of the file containing the CIGAR strings.
# Usage: --input-fmt lines
# --input-fmt tab COL-NUCL COL-CIGAR
# Choices:
# 'lines': One nucleotide and CIGAR string per input line
# (use --cigar-new to assume only M instead of giving
# CIGAR input)
#
# 'tab COL-NUCL COL-CIGAR': The file is in a tabular format.
# The first line contains the column names, the following
# files contain the content. Only the contents of the
# columns named COL-NUCL and COL-CIGAR are used. Columns
# are separated by a tab character (\\t), unless another
# character is specified by the --sep argument
# """))
# p.add_argument('--change-coords', nargs=2,
# help=dedent("""\
# If an output CIGAR string begins or ends with a deletion, change
# the true read coordinates instead. Else, such reads would not be
# assigned to their true position."""))
p.add_argument('--no-header',default = False, action = 'store_true',
help="""Do not expect a table header. Nucleotide strings
are expected as first input column and CIGAR strings as
second column. If no CIGAR strings are available, use
--cigar-new in addition to this option.""")
p.add_argument('--sep', default='\t', help=dedent("""\
Character separating the input columns if the input is
in tabular format (see --input-fmt). Common choices
are '\\t' (default), ',' or ';'."""))
p.add_argument('--seed', default = None, type=int,
help = dedent("""\
Set the random number generator seed to
this value. Calls with the same input files
and seed always produce the same result."""))
args = p.parse_args(argv)
if args.sep == '\\t':
args.sep = '\t'
if len(args.sep) != 1:
raise ValueError('--sep must be followed by only one character')
if args.in_exp is not None and (args.in_exp >= 1 or args.in_exp < 0):
raise ValueError('--in-exp must be >=0 and < 1')
if args.del_exp is not None and (args.del_exp >= 1 or args.del_exp < 0):
raise ValueError('--del-exp must be >=0 and < 1')
if args.in_prob < 0 or args.in_prob > 1:
raise ValueError('--in-prob must be >= 0 and <= 1')
if args.del_prob < 0 or args.del_prob > 1:
raise ValueError('--del-prob must be >= 0 and <= 1')
return args
def main(argv):
# --- Argument checks ----------------------------------------
args = parse_arguments(argv[1:])
if args.in_prob != 0:
if args.in_exp is None:
raise ValueError("--in-prob requires --in-exp")
if args.del_prob != 0:
if args.del_exp is None:
raise ValueError("--del-prob requires --del-exp")
if args.seed is not None:
random.seed(args.seed, version=2)
# --- Input parsing --------------------------------------------
input = sys.stdin
def safe_index(l, what):
try: return l.index(what)
except ValueError: return None
if not args.no_header:
header = next(input).rstrip().split(args.sep)
i_nucl, i_cigar = \
(safe_index(header, x) for x in \
(args.col_seq, args.col_cigar))#, cn_start, cn_stop))
if i_nucl is None:
raise ValueError(('The specified nucleotide column {} does '+
'not exist. Use the --col-seq parameter to set an '+
'existing column name.').format(args.col_seq))
if i_cigar is None and not args.cigar_new:
raise ValueError(('The specified CIGAR column {} does '+
'not exist. Use the --col-cigar parameter to set an '+
'existing column name or use the --cigar-new parameter '+
'to create new CIGAR strings.').format(args.col_cigar))
#i_rest = [i for i in range(0,len(header)) if i not in (i_nucl, i_cigar)]
else:
i_nucl, i_cigar = 0, 1
#i_rest = []
rows = (s.rstrip() for s in input)
fields = (s.split(args.sep) for s in rows if s != '')
if args.cigar_new:
if not args.no_header:
if not args.col_cigar in header:
header = header + [args.col_cigar]
else:
raise ValueError((
"The column name {} for the new CIGAR column "+
"already exists in the input. Choose a new column name "+
"using the --col-cigar option or omit the --cigar-new "+
"option if CIGAR strings exist already in the input.")
.format(args.col_cigar))
step1 = ( r + [addCIGAR(r[i_nucl])] for r in fields )
i_cigar = -1 # The last element of the row
else:
step1 = fields
step3 = mutatorExpLen(step1, args.in_prob
, args.in_exp, args.del_prob, args.del_exp, (i_nucl,i_cigar))
if not args.no_header:
print(args.sep.join(header))
for x in step3:
print(args.sep.join(x))
def splitter(tuples, idxs):
"""idxs: list of tuples of indices (integer). Returns a tuple with one
element for each element i in `idxs`. Return tuple i contains all tuples[j]
for all j in idxs[i].
>>> l = [(1,2,3), (4,5,6), (7,8,9)]
>>> list(splitter(l, [(1), (2,3)]))
[((1), (2, 3)), ((4), (5, 6)), ((7), (8, 9))]
"""
for n in tuples:
yield tuple(tuple(n[j] for j in idx) for idx in idxs)
def inputNormalizer(strings, sep):
"""Make sure no invalid data format (too many columns)
is specified and remove newlines"""
iLine = 0
ncols = None
while True:
s = next(strings)
iLine += 1
s.rstri
|
amarant/servo
|
python/servo/testing_commands.py
|
Python
|
mpl-2.0
| 31,871
| 0.002134
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import argparse
import re
import sys
import os
import os.path as path
import copy
from collections import OrderedDict
from time import time
from mach.registrar import Registrar
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, call, check_call, host_triple
from wptrunner import wptcommandline
from update import updatecommandline
from servo_tidy import tidy
from servo_tidy_tests import test_tidy
SCRIPT_PATH = os.path.split(__file__)[0]
PROJECT_TOPLEVEL_PATH = os.path.abspath(os.path.join(SCRIPT_PATH, "..", ".."))
WEB_PLATFORM_TESTS_PATH = os.path.join("tests", "wpt", "web-platform-tests")
SERVO_TESTS_PATH = os.path.join("tests", "wpt", "mozilla", "tests")
TEST_SUITES = OrderedDict([
("tidy", {"kwargs": {"all_files": False, "no_progress": False, "self_test": False},
"include_arg": "include"}),
("wpt", {"kwargs": {"release": False},
"paths": [path.abspath(WEB_PLATFORM_TESTS_PATH),
path.abspath(SERVO_TESTS_PATH)],
"include_arg": "include"}),
("css", {"kwargs": {"release": False},
"paths": [path.abspath(path.join("tests", "wpt", "css-tests"))],
"include_arg": "include"}),
("unit", {"kwargs": {},
"paths": [path.abspath(path.join("tests", "unit"))],
"include_arg": "test_name"}),
("compiletest", {"kwargs": {"release": False},
"paths": [path.abspath(path.join("tests", "compiletest"))],
"include_arg": "test_name"})
])
TEST_SUITES_BY_PREFIX = {path: k for k, v in TEST_SUITES.iteritems() if "paths" in v for path in v["paths"]}
def create_parser_wpt():
parser = wptcommandline.create_parser()
parser.add_argument('--release', default=False, action="store_true",
help="Run with a release build of servo")
parser.add_argument('--chaos', default=False, action="store_true",
help="Run under chaos mode in rr until a failure is captured")
parser.add_argument('--pref', default=[], action="append", dest="prefs",
help="Pass preferences to servo")
return parser
@CommandProvider
class MachCommands(CommandBase):
DEFAULT_RENDER_MODE = "cpu"
HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"
def __init__(self, context):
CommandBase.__init__(self, context)
if not hasattr(self.context, "built_tests"):
self.context.built_tests = False
@Command('test',
description='Run specified Servo tests',
category='testing')
@CommandArgument('params', default=None, nargs="...",
help="Optionally select test based on "
"test file directory")
@CommandArgument('--render-mode', '-rm', default=DEFAULT_RENDER_MODE,
help="The render mode to be used on all tests. " +
HELP_RENDER_MODE)
@CommandArgument('--release', default=False, action="store_true",
help="Run with a release build of servo")
@CommandArgument('--tidy-all', default=False, action="store_true",
help="Check all files, and run the WPT lint in tidy, "
"even if unchanged")
@CommandArgument('--no-progress', default=False, action="store_true",
help="Don't show progress for tidy")
@CommandArgument('--self-test', default=False, action="store_true",
help="Run unit tests for tidy")
@CommandArgument('--all', default=False, action="store_true", dest="all_suites",
help="Run all test suites")
def test(self, params, render_mode=DEFAULT_RENDER_MODE, release=False, tidy_all=False,
no_progress=False, self_test=False, all_suites=False):
suites = copy.deepcopy(TEST_SUITES)
suites["tidy"]["kwargs"] = {"all_files": tidy_all, "no_progress": no_progress, "self_test": self_test}
suites["wpt"]["kwargs"] = {"release": release}
suites["css"]["kwargs"] = {"release": release}
suites["unit"]["kwargs"] = {}
suites["compiletest"]["kwargs"] = {"release": release}
selected_suites = OrderedDict()
if params is None:
if all_suites:
params = suites.keys()
else:
print("Specify a test path or suite name, or pass --all to run all test suites.\n\nAvailable suites:")
for s in suites:
print(" %s" % s)
return 1
for arg in params:
found = False
if arg in suites and arg not in selected_suites:
selected_suites[arg] = []
found = True
else:
suite = self.suite_for_path(arg)
if suite is not None:
if suite not in selected_suites:
selected_suites[suite] = []
selected_suites[suite].append(arg)
found = True
break
if not found:
print("%s is not a valid test path or suite name" % arg)
return 1
test_start = time()
for suite, tests in selected_suites.iteritems():
props = suites[suite]
kwargs = props.get("kwargs", {})
if tests:
kwargs[props["include_arg"]] = tests
Registrar.dispatch("test-%s" % suite, context=self.context, **kwargs)
elapsed = time() - test_start
print("Tests completed in %0.2fs" % elapsed)
# Helper to determine which test suite owns the path
def suite_for_path(self, path_arg):
if os.path.exists(path.abspath(path_arg)):
abs_path = path
|
.abspath(path_arg)
for prefix, suite in TEST_SUITES_BY_PREFIX.iteritems():
if abs_path.startswith(prefix):
return suite
return None
@Command('test-geckolib',
descript
|
ion='Test geckolib sanity checks',
category='testing')
def test_geckolib(self):
self.ensure_bootstrapped()
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
return call(["cargo", "test"], env=env, cwd=path.join("ports", "geckolib"))
@Command('test-unit',
description='Run unit tests',
category='testing')
@CommandArgument('--package', '-p', default=None, help="Specific package to test")
@CommandArgument('test_name', nargs=argparse.REMAINDER,
help="Only run tests that match this pattern or file path")
def test_unit(self, test_name=None, package=None):
if test_name is None:
test_name = []
self.ensure_bootstrapped()
if package:
packages = {package}
else:
packages = set()
test_patterns = []
for test in test_name:
# add package if 'tests/unit/<package>'
match = re.search("tests/unit/(\\w+)/?$", test)
if match:
packages.add(match.group(1))
# add package & test if '<package>/<test>', 'tests/unit/<package>/<test>.rs', or similar
elif re.search("\\w/\\w", test):
tokens = test.split("/")
packages.add(tokens[-2])
test_prefix = tokens[-1]
if test_prefix.endswith(".rs"):
test_prefix = test_prefix[:-3]
test_prefix += "::"
test_patterns.append(test_prefix)
# add te
|
Ajoo/pySystems
|
pysystems/ident/systems.py
|
Python
|
mit
| 549
| 0.009107
|
from .. import DStateSpace
def extension_method(cls):
def decorator(func):
|
setattr(cls, func.__name__, func)
return func
return decorator
def extension_class(name, bases, namespace):
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.iteritems():
if name != "__metaclass__":
setattr(base, name, value)
return base
class IdentDStateSpace(DStateSpace):
__metaclass__ = extension_
|
class
def fit(self, u, y):
pass
|
edx-solutions/edx-platform
|
cms/lib/xblock/tagging/migrations/0002_auto_20170116_1541.py
|
Python
|
agpl-3.0
| 571
| 0.003503
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tagging', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
|
name='tagavailablevalues',
options={'ordering': ('id',), 'verbose_name': 'available tag value'},
),
migrations.AlterModelOptions(
name='tagcategories',
options={'ordering': ('title',), 'verbose_name': 'tag category', 'verbose_name_plural': 'tag categories'},
),
|
]
|
allenai/allennlp
|
allennlp/nn/chu_liu_edmonds.py
|
Python
|
apache-2.0
| 10,283
| 0.000486
|
from typing import List, Set, Tuple, Dict
import numpy
from allennlp.common.checks import ConfigurationError
def decode_mst(
energy: numpy.ndarray, length: int, has_labels: bool = True
) -> T
|
uple[numpy.ndarray, numpy.ndarray]:
"""
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
# Parameters
energy : `numpy.ndarray`, required.
A tensor with shape (num_labels, timestep
|
s, timesteps)
containing the energy of each edge. If has_labels is `False`,
the tensor should have shape (timesteps, timesteps) instead.
length : `int`, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : `bool`, optional, (default = `True`)
Whether the graph has labels or not.
"""
if has_labels and energy.ndim != 3:
raise ConfigurationError("The dimension of the energy array is not equal to 3.")
elif not has_labels and energy.ndim != 2:
raise ConfigurationError("The dimension of the energy array is not equal to 2.")
input_shape = energy.shape
max_length = input_shape[-1]
# Our energy matrix might have been batched -
# here we clip it to contain only non padded tokens.
if has_labels:
energy = energy[:, :length, :length]
# get best label for each edge.
label_id_matrix = energy.argmax(axis=0)
energy = energy.max(axis=0)
else:
energy = energy[:length, :length]
label_id_matrix = None
# get original score matrix
original_score_matrix = energy
# initialize score matrix to original score matrix
score_matrix = numpy.array(original_score_matrix, copy=True)
old_input = numpy.zeros([length, length], dtype=numpy.int32)
old_output = numpy.zeros([length, length], dtype=numpy.int32)
current_nodes = [True for _ in range(length)]
representatives: List[Set[int]] = []
for node1 in range(length):
original_score_matrix[node1, node1] = 0.0
score_matrix[node1, node1] = 0.0
representatives.append({node1})
for node2 in range(node1 + 1, length):
old_input[node1, node2] = node1
old_output[node1, node2] = node2
old_input[node2, node1] = node2
old_output[node2, node1] = node1
final_edges: Dict[int, int] = {}
# The main algorithm operates inplace.
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
heads = numpy.zeros([max_length], numpy.int32)
if has_labels:
head_type = numpy.ones([max_length], numpy.int32)
else:
head_type = None
for child, parent in final_edges.items():
heads[child] = parent
if has_labels:
head_type[child] = label_id_matrix[parent, child]
return heads, head_type
def chu_liu_edmonds(
length: int,
score_matrix: numpy.ndarray,
current_nodes: List[bool],
final_edges: Dict[int, int],
old_input: numpy.ndarray,
old_output: numpy.ndarray,
representatives: List[Set[int]],
):
"""
Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
# Parameters
length : `int`, required.
The number of nodes.
score_matrix : `numpy.ndarray`, required.
The score matrix representing the scores for pairs
of nodes.
current_nodes : `List[bool]`, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges : `Dict[int, int]`, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input : `numpy.ndarray`, required.
old_output : `numpy.ndarray`, required.
representatives : `List[Set[int]]`, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
# Returns
Nothing - all variables are modified in place.
"""
# Set the initial graph to be the greedy best one.
parents = [-1]
for node1 in range(1, length):
parents.append(0)
if current_nodes[node1]:
max_score = score_matrix[0, node1]
for node2 in range(1, length):
if node2 == node1 or not current_nodes[node2]:
continue
new_score = score_matrix[node2, node1]
if new_score > max_score:
max_score = new_score
parents[node1] = node2
# Check if this solution has a cycle.
has_cycle, cycle = _find_cycle(parents, length, current_nodes)
# If there are no cycles, find all edges and return.
if not has_cycle:
final_edges[0] = -1
for node in range(1, length):
if not current_nodes[node]:
continue
parent = old_input[parents[node], node]
child = old_output[parents[node], node]
final_edges[child] = parent
return
# Otherwise, we have a cycle so we need to remove an edge.
# From here until the recursive call is the contraction stage of the algorithm.
cycle_weight = 0.0
# Find the weight of the cycle.
index = 0
for node in cycle:
index += 1
cycle_weight += score_matrix[parents[node], node]
# For each node in the graph, find the maximum weight incoming
# and outgoing edge into the cycle.
cycle_representative = cycle[0]
for node in range(length):
if not current_nodes[node] or node in cycle:
continue
in_edge_weight = float("-inf")
in_edge = -1
out_edge_weight = float("-inf")
out_edge = -1
for node_in_cycle in cycle:
if score_matrix[node_in_cycle, node] > in_edge_weight:
in_edge_weight = score_matrix[node_in_cycle, node]
in_edge = node_in_cycle
# Add the new edge score to the cycle weight
# and subtract the edge we're considering removing.
score = (
cycle_weight
+ score_matrix[node, node_in_cycle]
- score_matrix[parents[node_in_cycle], node_in_cycle]
)
if score > out_edge_weight:
out_edge_weight = score
out_edge = node_in_cycle
score_matrix[cycle_representative, node] = in_edge_weight
old_input[cycle_representative, node] = old_input[in_edge, node]
old_output[cycle_representative, node] = old_output[in_edge, node]
score_matrix[node, cycle_representative] = out_edge_weight
old_output[node, cycle_representative] = old_output[node, out_edge]
old_input[node, cycle_representative] = old_input[node, out_edge]
# For the next recursive iteration, we want to consider the cycle as a
# single node. Here we collapse the cycle into the first node in the
# cycle (first node is arbitrary), set all the other nodes not be
# considered in the next iteration. We also keep track of which
# representatives we are considering this iteration because we need
# them below to check if we're done.
considered_representatives: List[Set[int]] = []
for i, node_in_cycle in enumerate(cycle):
considered_representatives.append(set())
if i > 0:
# We need to consider at least one
# node in the cycle, arbitrarily choose
# the first.
current_nodes[node_in_cycle] = False
for node in representatives[node_in_cycle]:
considered_representatives[i].add(node)
if i > 0:
representatives[cycle_representative].add(node)
chu_liu_edmonds(
length
|
joaduo/mepinta
|
core/python_core/common/type_checking/isiterable.py
|
Python
|
gpl-3.0
| 972
| 0
|
# -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepin
|
ta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Pu
|
blic License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
def isiterable(instance):
try:
iterator = iter(instance)
return True
except TypeError as e:
return False
if __name__ == "__main__":
debugPrint(isiterable([]))
debugPrint(isiterable("foo"))
debugPrint(isiterable(10))
|
JPWKU/unix-agent
|
src/dcm/agent/messaging/utils.py
|
Python
|
apache-2.0
| 2,118
| 0
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, e
|
ither express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
import uuid
import dcm.agent.utils as agent_utils
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
_g_message_uuid = str(uui
|
d.uuid4()).split("-")[0]
_g_message_id_count = 0
_g_guid_lock = threading.RLock()
def new_message_id():
# note: using uuid here caused deadlock in tests
global _g_message_id_count
global _g_message_uuid
_g_guid_lock.acquire()
try:
_g_message_id_count = _g_message_id_count + 1
finally:
_g_guid_lock.release()
return _g_message_uuid + str(_g_message_id_count)
class MessageTimer(object):
def __init__(self, timeout, callback, message_doc):
self._send_doc = message_doc
self._timeout = timeout
self._cb = callback
self._timer = None
self._lock = threading.RLock()
self.message_id = message_doc['message_id']
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
@agent_utils.class_method_sync
def send(self, conn):
_g_logger.info("Resending reply to %s" % self._send_doc["request_id"])
self._send_doc['entity'] = "timer"
conn.send(self._send_doc)
self._timer = dcm_events.register_callback(
self._cb, args=[self], delay=self._timeout)
@agent_utils.class_method_sync
def cancel(self):
if self._timer is None:
return
dcm_events.cancel_callback(self._timer)
self._timer = None
|
ageron/tensorflow
|
tensorflow/python/keras/layers/core_test.py
|
Python
|
apache-2.0
| 11,584
| 0.004575
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([math_ops.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.
|
layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np
|
.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return math_ops.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
layer = keras.layers.Lambda.from_config(config)
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertTrue(y._keras_mask is not None)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Te
|
zyrikby/BBoxTester
|
BBoxTester/main_intents_strategy.py
|
Python
|
apache-2.0
| 5,189
| 0.007901
|
'''
Created on Nov 26, 2014
@author: Yury Zhauniarovich <y.zhalnerovich{at}gmail.com>
'''
import os, time
from interfaces.adb_interface import AdbInterface
from bboxcoverage import BBoxCoverage
from running_strategies import IntentInvocationStrategy
import smtplib
import email.utils
from email.mime.text import MIMEText
APK_DIR_SOURCES = ["", ""]
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
SENDER = ""
PASSWORD = ""
TO_EMAIL = ""
def sendMessage(subj, email_message):
msg = MIMEText(email_message)
msg['To'] = email.utils.formataddr(('Recipient', TO_EMAIL))
msg['From'] = email.utils.formataddr(('Author', SENDER))
msg['Subject'] = subj
server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
try:
server.set_debuglevel(True)
# identify ourselves, prompting server for supported features
server.ehlo()
# If we can encrypt this session, do it
if server.has_extn('STARTTLS'):
server.starttls()
server.ehlo() # re-identify ourselves over TLS connection
server.login(SENDER, PASSWORD)
server.sendmail(SENDER, [TO_EMAIL], msg.as_string())
finally:
server.quit()
def getExecutionDevice():
'''
This method allows a user to select a device that is used to for further
analysis.
'''
dev_list = AdbInterface.getDeviceSerialsList()
devNum = len(dev_list)
if devNum <= 0:
print "No device has been detected! Connect your device and restart the application!"
return
if devNum == 1:
return dev_list[0]
choice = None
if devNum > 1:
print "Select the device to use for analysis:\n"
for i in xrange(0, devNum):
print "%d. %s\n" % ((i + 1), dev_list[i])
while not choice:
try:
choice = int(raw_input())
if choice not in range(1, devNum+1):
choice = None
print 'Invalid choice! Choose right number!'
except ValueError:
print 'Invalid Number! Choose right number!'
return dev_list[choice-1]
def getSubdirs(rootDir):
return [os.path.join(rootDir, name) for name in os.listdir(rootDir)
if os.path.isdir(os.path.join(rootDir, name))]
def getInstrApkInFolder(folder):
for f in os.listdir(folder):
if f.endswith("_aligned.apk"):
filepath = os.path.join(folder, f)
return filepath
return None
def runMainIntentsStrategy(adb, androidManifest, delay=10):
automaticTriggeringStrategy = IntentInvocationStrategy(adbDevice=adb, pathToAndroidManifest=androidManifest)
automaticTriggeringStrategy.run(delay=delay)
#main part
adb = AdbInterface()
device = getExecutionDevice()
if not device:
exit(1)
adb.setTargetSerial(device)
bboxcoverage = BBoxCoverage()
for apk_dir_source in APK_DIR_SOURCES:
print "\n\nStarting experiment for directory: [%s]" % apk_dir_source
result_directories = getSubdirs(apk_dir_source)
for directory in result_directories:
apk_file = getInstrApkInFolder(directory)
if apk_file:
print "Starting experiment for apk: [%s]" % apk_file
try:
bboxcoverage.initAlreadyInstrApkEnv(pathToInstrApk=apk_file, resultsDir=directory)
except:
print "Exception while initialization!"
continue
try:
bboxcoverage.installApkOnDevice()
except:
print "Exception while installation apk on device!"
bboxcoverage.uninstallPackage()
try:
bboxcoverage.installApkOnDevice()
except:
continue
package_name = bboxcoverage.getPackageName()
params = {}
params["strategy"] = "main_intents"
params["package_name"] = package_name
params["main_activity"] = bboxcoverage.androidManifest.getMainActivity()
try:
bboxcoverage.startTesting()
except:
print "Exception while startTesting!"
bboxcoverage.uninstallPackage()
continue
try:
runMainIntentsStrategy(adb=adb, androidManifest=bboxcoverage.androidManifestFile, delay=10)
except:
print "Exception while running strategy!"
bboxcoverage.uninstallPackage()
continue
|
try:
bboxcoverage.stopTesting("main_intents", paramsToWrite=params)
except:
print "Exception while running strategy!"
bboxcoverage.uninstallPackage()
continue
time.sleep(3)
bboxcoverage.uninstallPackage()
|
time.sleep(5)
sendMessage("[BBoxTester]", "Experiments done for directory [%s]!" % apk_dir_source)
|
google/jax
|
jax/_src/scipy/stats/geom.py
|
Python
|
apache-2.0
| 1,244
| 0.002412
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.stats as osp_stats
from jax import lax
from jax._src.lax.lax import _const as _lax_const
from jax._src.numpy import lax_numpy as jnp
from jax._src.numpy.util import _wraps
from jax.scipy.special import xlog1py
@_wraps(osp_stats.geom.logpmf, update_doc=False)
def logpmf(k, p, loc=0):
k, p, loc = jnp._promote_args_inexact("geom.logpmf", k, p, loc)
z
|
ero = _lax_const(k, 0)
one = _lax_
|
const(k, 1)
x = lax.sub(k, loc)
log_probs = xlog1py(lax.sub(x, one), -p) + lax.log(p)
return jnp.where(lax.le(x, zero), -jnp.inf, log_probs)
@_wraps(osp_stats.geom.pmf, update_doc=False)
def pmf(k, p, loc=0):
return jnp.exp(logpmf(k, p, loc))
|
earonesty/bitcoin
|
test/functional/keypool.py
|
Python
|
mit
| 3,529
| 0.002834
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class KeyPoolTest(BitcoinTestFramework):
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
|
addr_data = nodes[0].validateaddress(addr)
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
assert_raises_jsonrpc(-12, "Error: Keypool ran out, please call keypoolrefill first", no
|
des[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_jsonrpc(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
assert_raises_jsonrpc(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
assert_raises_jsonrpc(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
if __name__ == '__main__':
KeyPoolTest().main()
|
grehx/spark-tk
|
regression-tests/sparktkregtests/testcases/dicom/take_dicom_test.py
|
Python
|
apache-2.0
| 3,835
| 0
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.inspect() functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
from lxml import etree
class TakeDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(TakeDicomTest, self).setUp()
self.dataset = self.get_file("di
|
com_uncompresse
|
d")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = self.get_local_dataset("dicom_xml/")
self.image_directory = self.get_local_dataset("dicom_uncompressed/")
self.count = self.dicom.metadata.count()
def test_metadata_imagedata_row_count_same(self):
"""test metadata pixeldata row count"""
metadata_result = self.dicom.metadata.get_inspect(
self.dicom.metadata.count())
image_result = self.dicom.pixeldata.get_inspect(
self.dicom.pixeldata.count())
self.assertEqual(len(metadata_result.rows), len(image_result.rows))
def test_metadata_content_take_dcm_basic(self):
"""content test of dicom metadata import"""
# here we will get the files so we can generate the expected result
files = []
for filename in os.listdir(self.xml_directory):
if filename.endswith(".xml"):
with open(self.xml_directory + str(filename), 'rb') as xmlfile:
contents = xmlfile.read()
xml = etree.fromstring(contents)
bulk_data = xml.xpath("//BulkData")[0]
bulk_data.getparent().remove(bulk_data)
files.append(etree.tostring(xml))
# the BulkData location element of the metadata xml will be different
# since the dicom may load the data from a differnet location then
# where we loaded our files. We will remove this element from the
# metadata before we compare
metadata_take = self.dicom.metadata.take(self.count)
for dcm_file in metadata_take:
dcm_file = dcm_file[1].encode("ascii", "ignore")
dcm_xml_root = etree.fromstring(dcm_file)
dcm_bulk_data = dcm_xml_root.xpath("//BulkData")[0]
dcm_bulk_data.getparent().remove(dcm_bulk_data)
self.assertTrue(etree.tostring(dcm_xml_root) in files)
def test_image_content_take_dcm_basic(self):
"""content test of image data for dicom"""
# load the files so we can compare with the dicom result
files = []
for filename in os.listdir(self.image_directory):
pixel_data = dicom.read_file(
self.image_directory + filename).pixel_array
files.append(pixel_data)
# iterate through the data in the files and in the dicom frame
# and ensure that they match
image_inspect = self.dicom.pixeldata.take(self.count)
for dcm_image in image_inspect:
result = any(numpy.array_equal(
dcm_image[1], file_image) for file_image in files)
self.assertTrue(result)
if __name__ == "__main__":
unittest.main()
|
antoinevg/survival
|
widgets/texteditor.py
|
Python
|
gpl-2.0
| 7,247
| 0.019594
|
import gtk
import pango
import math
from core.world import TheWorld
class TextEditor(object):
def __init__(self, text):
self.__text = text
self.cursorindex = 0
self.padding = 10.0
self.width = 0.0
self.height = 0.0
self.pixel_width = 0.0
self.pixel_height = 0.0
# create text layout
self.layout = pango.Layout(TheWorld.pango_context)
fontDescription = pango.FontDescription("Monospace 8")
self.layout.set_font_description(fontDescription)
#layout.set_markup(self.text)
self.layout.set_text(text)
# calc text metrics
self.recalc_text_size()
# -- properties
def __get_text(self):
return self.__text
def __set_text(self, text):
self.__text = text
self.layout.set_text(self.__text)
self.recalc_text_size() # recalc text size
text = property(__get_text, __set_text)
def recalc_text_size(self):
(self.pixel_width, self.pixel_height) = self.layout.get_pixel_size() # bogus when called from init() !?
self.width = self.pixel_width / float(TheWorld.width) #+ self.padding * 2
self.height = self.pixel_height / float(TheWorld.height) #+ self.padding
def draw(self, context, x, y):
# figure out scale factor
# TODO - Text should be independant of scale factor
scale_x = 1.0 / self.pixel_width
scale_y = 1.0 / self.pixel_height
# render the text
context.save()
#context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.move_to(0.0, 0.0)
context.scale(scale_x, scale_y)
# draw a background for the text
self.draw_background(context, 0.0, 0.0, self.pixel_width, self.pixel_height, 10) # ve vant square rounded corners :-)
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.show_layout(self.layout)
context.restore()
# render cursor
self.draw_cursor(context)
def draw_cursor(self, context):
(strong, weak) = self.layout.get_cursor_pos(self.cursorindex)
(startx, starty, curx, cury) = strong
startx /= pango.SCALE * float(TheWorld.width)
starty /= pango.SCALE * float(TheWorld.height)
curx /= pango.SCALE * float(TheWorld.width)
cury /= pango.SCALE * float(TheWorld.height)
context.set_line_width(0.02)
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.move_to(0.001 + (startx / self.width), starty / self.height)
context.line_to(0.001 + (startx / self.width), (starty + cury) / self.height)
context.stroke()
def draw_background(self, context, x, y, w, h, r):
x -= self.padding
y -= self.padding
w += self.padding * 2.0
h += self.padding * 2.0
# rounded box
context.move_to(x + r, y) # top left
context.line_to(x + w - r, y) # top right
context.arc(x + w - r, y + r, r, math.pi + math.pi / 2.0, 0.0)
context.line_to(x + w, y + h - r) # bottom right
context.arc(x + w - r, y + h - r, r, 0, math.pi / 2.0)
context.line_to(x + r, y + h) # bottom left
context.arc(x + r, y + h - r, r, math.pi / 2.0, math.pi)
context.line_to(x, y + r) # top left
context.arc(x + r, y + r, r, math.pi, math.pi + math.pi / 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_line_width(4.0)
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
# thought bubble
context.arc(x + w / 1.5, y + h * 1.1, self.pixel_height / 10.0, 0.0, math.pi * 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
context.arc(x + w / 1.7, y + h * 1.2, self.pixel_height / 20.0, 0.0, math.pi * 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
context.arc(x + w / 1.9, y + h * 1.3, self.pixel_height / 30.0, 0.0, math.pi * 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
# -- key handling ---------------------------------------------------------
def do_key_press_event(self, event):
try :
{ gtk.keysyms.BackSpace : self.do_key_press_backspace,
gtk.keysyms.Delete : self.do_key_press_delete,
gtk.keysyms.Home : self.do_key_press_home,
gtk.keysyms.End : self.do_key_press_end,
gtk.keysyms.Left : self.do_key_press_left,
gtk.keysyms.Right : self.do_key_press_right,
gtk.keysyms.Up : self.do_key_press_up,
gtk.keysyms.Down : self.do_key_press_down } [event.keyval]()
except:
pass
if event.string:
left = self.text[ : self.cursorindex]
right = self.text[self.cursorindex : ]
if event.string == "\r":
self.text = left + "\n" + right
else:
self.text = left + event.string + right
self.cursorindex += 1
def do_key_press_backspace(self):
left = self.text[ : self.cursorindex - 1]
right = self.text[self.cursorindex : ]
self.text = left + right
if self.cursorindex > 0:
self.cursorindex -= 1
def do_key_press_delete(self):
left = self.text[ : self.cursorindex]
right = self.text[self.cursorindex + 1 : ]
self.text = left + right
def do_key_press_home(self):
lines = self.text.splitlines ()
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
self.cursorindex = loc - len(i) - 1
return
line += 1
def do_key_press_end(self):
lines = self.text.splitlines()
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
self.cursorindex = loc - 1
return
line += 1
def do_key_press_left(self):
if self.cursorindex > 0:
self.cursorindex -= 1
def do_key_press_right(self):
if self.cursorindex < len(self.text):
self.cursorindex += 1
def do_key_press_up(self):
lines = self.text.splitlines()
if len(lines) == 1:
return
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
loc -= len(i) + 1
line -= 1
break
line += 1
if line == -1:
return
elif line >= len(lines):
self.cursorindex -= len(lines[-1]) + 1
return
dist = self.cursorindex - loc -1
self.cursorindex = loc
if dist < len(lines[line]):
self.cursorindex -= (len
|
(lines[line]) - dist)
else:
self.cursorindex -= 1
def do_key_press_down(self):
lines = self.text.splitlines()
if len(lines) == 1:
return
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
break
line += 1
if line >= len(lines) - 1:
return
dist = self.cursorindex - (loc - len(lines[line])) + 1
self.cursorindex = loc
if dist > len (lines[line + 1]):
self.cursori
|
ndex += len(lines[line + 1])
else:
self.cursorindex += dist
|
willybh11/python
|
tetris/tetris.py
|
Python
|
gpl-3.0
| 48
| 0.083333
|
d
|
ef main():
pass
def generateBlock():
| |
zferentz/maproxy
|
maproxy/session.py
|
Python
|
apache-2.0
| 16,890
| 0.013914
|
#!/usr/bin/env python
import tornado
import socket
import maproxy.proxyserver
class Session(object):
"""
The Session class if the heart of the system.
- We create the session when a client connects to the server (proxy). this connection is "c2p"
- We create a connection to the server (p2s)
- Each connection (c2p,p2s) has a state (Session.State) , can be CLOSED,CONNECTING,CONNECTED
- Initially, when c2p is created we :
- create the p-s connection
- start read from c2p
- Completion Routings:
- on_XXX_done_read:
When we get data from one side, we initiate a "start_write" to the other side .
Exception: if the target is not connected yet, we queue the data so we can send it later
- on_p2s_connected:
When p2s is connected ,we start read from the server .
if queued data is available (data that was sent from the c2p) we initiate a "start_write" immediately
- on_XXX_done_write:
When we're done "sending" data , we check if there's more data to send in the queue.
if there is - we initiate another "start_write" with the queued data
- on_XXX_close:
When one side closes the connection, we either initiate a "start_close" on the other side, or (if already closed) - remove the session
- I/O routings:
- XXX_start_read: simply start read from the socket (we assume and validate that only one read goes at a time)
-
|
XXX_start_write: if currently writing , add data to queue. if not writing - perform io_write...
"""
class LoggerOptions:
"""
Logging options - which messages/notifications we would like to log...
The logging is for development&maintenance. In production set all to False
"""
# Log charactaristics
LOG_SESSION_ID=True # for each log, add the session-id
# Log different operations
|
LOG_NEW_SESSION_OP=False
LOG_READ_OP=False
LOG_WRITE_OP=False
LOG_CLOSE_OP=False
LOG_CONNECT_OP=False
LOG_REMOVE_SESSION=False
class State:
"""
Each socket has a state.
We will use the state to identify whether the connection is open or closed
"""
CLOSED,CONNECTING,CONNECTED=range(3)
def __init__(self):
pass
#def new_connection(self,stream : tornado.iostream.IOStream ,address,proxy):
def new_connection(self,stream ,address,proxy):
# First,validation
assert isinstance(proxy,maproxy.proxyserver.ProxyServer)
assert isinstance(stream,tornado.iostream.IOStream)
# Logging
self.logger_nesting_level=0 # logger_nesting_level is the current "nesting level"
if Session.LoggerOptions.LOG_NEW_SESSION_OP:
self.log("New Session")
# Remember our "parent" ProxyServer
self.proxy=proxy
# R/W flags for each socket
# Using the flags, we can tell if we're waiting for I/O completion
# NOTE: the "c2p" and "p2s" prefixes are NOT the direction of the IO,
# they represent the SOCKETS :
# c2p means the socket from the client to the proxy
# p2s means the socket from the proxy to the server
self.c2p_reading=False # whether we're reading from the client
self.c2p_writing=False # whether we're writing to the client
self.p2s_writing=False # whether we're writing to the server
self.p2s_reading=False # whether we're reading from the server
# Init the Client->Proxy stream
self.c2p_stream=stream
self.c2p_address=address
# Client->Proxy is connected
self.c2p_state=Session.State.CONNECTED
# Here we will put incoming data while we're still waiting for the target-server's connection
self.c2s_queued_data=[] # Data that was read from the Client, and needs to be sent to the Server
self.s2c_queued_data=[] # Data that was read from the Server , and needs to be sent to the client
# send data immediately to the client ... (Disable Nagle TCP algorithm)
self.c2p_stream.set_nodelay(True)
# Let us now when the client disconnects (callback on_c2p_close)
self.c2p_stream.set_close_callback( self.on_c2p_close)
# Create the Proxy->Server socket and stream
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
if self.proxy.server_ssl_options is not None:
# if the "server_ssl_options" where specified, it means that when we connect, we need to wrap with SSL
# so we need to use the SSLIOStream stream
self.p2s_stream = tornado.iostream.SSLIOStream(s,ssl_options=self.proxy.server_ssl_options)
else:
# use the standard IOStream stream
self.p2s_stream = tornado.iostream.IOStream(s)
# send data immediately to the server... (Disable Nagle TCP algorithm)
self.p2s_stream.set_nodelay(True)
# Let us now when the server disconnects (callback on_p2s_close)
self.p2s_stream.set_close_callback( self.on_p2s_close )
# P->S state is "connecting"
self.p2s_state=self.p2s_state=Session.State.CONNECTING
self.p2s_stream.connect(( proxy.target_server, proxy.target_port), self.on_p2s_done_connect )
# We can actually start reading immediatelly from the C->P socket
self.c2p_start_read()
# Each member-function can call this method to log data (currently to screen)
def log(self,msg):
prefix=str(id(self))+":" if Session.LoggerOptions.LOG_SESSION_ID else ""
prefix+=self.logger_nesting_level*" "*4
logging.debug(prefix + msg)
# Logging decorator (enter/exit)
def logger(enabled=True):
"""
We use this decorator to wrap functions and log the input/ouput of each function
Since this decorator accepts a parameter, it must return an "inner" decorator....(Python stuff)
"""
def inner_decorator(func):
def log_wrapper(self,*args,**kwargs):
msg="%s (%s,%s)" % (func.__name__,args,kwargs)
self.log(msg)
self.logger_nesting_level+=1
r=func(self,*args,**kwargs)
self.logger_nesting_level-=1
self.log("%s -> %s" % (msg,str(r)) )
return r
return log_wrapper if enabled else func
return inner_decorator
################
## Start Read ##
################
@logger(LoggerOptions.LOG_READ_OP)
def c2p_start_read(self):
"""
Start read from client
"""
assert( not self.c2p_reading)
self.c2p_reading=True
try:
self.c2p_stream.read_until_close(lambda x: None,self.on_c2p_done_read)
except tornado.iostream.StreamClosedError:
self.c2p_reading=False
@logger(LoggerOptions.LOG_READ_OP)
def p2s_start_read(self):
"""
Start read from server
"""
assert( not self.p2s_reading)
self.p2s_reading=True
try:
self.p2s_stream.read_until_close(lambda x:None,self.on_p2s_done_read)
except tornado.iostream.StreamClosedError:
self.p2s_reading=False
##############################
## Read Completion Routines ##
##############################
@logger(LoggerOptions.LOG_READ_OP)
def on_c2p_done_read(self,data):
# # We got data from the client (C->P ) . Send data to the server
assert(self.c2p_reading)
assert(data)
self.p2s_start_write(data)
@logger(LoggerOptions.LOG_READ_OP)
def on_p2s_done_read(self,data):
# got data from S
|
mick-d/nipype_source
|
nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py
|
Python
|
bsd-3-clause
| 1,344
| 0.022321
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.filtering.extractskeleton import ExtractSkeleton
def test_ExtractSkeleton_inputs():
input_m
|
ap = dict(InputImageFileName=dict(argstr='%s',
position=-2,
),
OutputImageFileName=dict(argstr='%s',
hash_files=False,
position=-1,
),
args=dict(argstr='%s',
),
dontPrune=dict(argstr='--dontPrune ',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
|
),
numPoints=dict(argstr='--numPoints %d',
),
pointsFile=dict(argstr='--pointsFile %s',
),
terminal_output=dict(mandatory=True,
nohash=True,
),
type=dict(argstr='--type %s',
),
)
inputs = ExtractSkeleton.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ExtractSkeleton_outputs():
output_map = dict(OutputImageFileName=dict(position=-1,
),
)
outputs = ExtractSkeleton.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
ekamioka/gpss-research
|
experiments/synth/synth.py
|
Python
|
mit
| 1,731
| 0.010976
|
Experiment(description='Synthetic data sets of interest',
data_dir='../data/synth/',
max_depth=9,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=300,
verbose=False,
make_predictions=False,
skip_complete=False,
results_dir='../results/synth/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=1,
period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=2,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='pl2',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-
|
not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A
|
'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})])
|
ta2-1/pootle
|
tests/pootle_fs/receivers.py
|
Python
|
gpl-3.0
| 1,046
| 0
|
# -*- coding: utf-8 -
|
*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle_format.models import Format
|
@pytest.mark.django_db
def test_fs_filetypes_changed_receiver(project_fs, project1, po2):
project1.filetype_tool.add_filetype(po2)
assert not project1.revisions.filter(key="pootle.fs.sync").count()
project1.treestyle = "pootle_fs"
project1.save()
xliff = Format.objects.get(name="xliff")
project1.filetypes.add(xliff)
# still not configured
assert not project1.revisions.filter(key="pootle.fs.sync").count()
sync_start = project_fs.project.revisions.filter(
key="pootle.fs.sync").first()
project_fs.project.filetype_tool.add_filetype(po2)
assert (
sync_start
!= project_fs.project.revisions.filter(
key="pootle.fs.sync").first())
|
alphagov/notifications-api
|
migrations/versions/0212_remove_caseworking.py
|
Python
|
mit
| 586
| 0.005119
|
"""
Revision ID: 0212_remove_caseworking
Revise
|
s: 0211_email_branding_update
Create Date: 2018-07-31 18:00:20.457755
"""
from alembic import op
revision = '0212_remove_caseworking'
down_revision = '0211_email_branding_update'
PERMISSION_NAME = "caseworking"
def upgrade():
op.execute("delete from service_permissions where permission = '{}'".format(PERMISSION_NAME))
op.execute("delete from service_permission_types where name = '{}'".format(PERMISSION_NAME))
def downgrade():
op.execute("insert into s
|
ervice_permission_types values('{}')".format(PERMISSION_NAME))
|
google-research/google-research
|
kws_streaming/layers/lstm.py
|
Python
|
apache-2.0
| 10,610
| 0.005467
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LSTM layer."""
from kws_streaming.layers import modes
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
class LSTM(tf.keras.layers.Layer):
"""LSTM with support of streaming inference with internal/external state.
In training mode we use LSTM.
It receives input data [batch, time, feature] and
returns [batch, time, units] if return_sequences==True or
returns [batch, 1, units] if return_sequences==False
In inference mode we use LSTMCell
In streaming mode with internal state
it receives input data [batch, 1, feature]
In streaming mode with internal state it returns: [batch, 1, units]
In streaming mode with external state it receives input data with states:
[batch, 1, feature] + state1[batch, units] + state2[batch, units]
In streaming mode with external state it returns:
(output[batch, 1, units], state1[batch, units], state2[batch, units])
We use layer and parameter description from:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM
https://www.tensorflow.org/api_docs/python/tf/compat/v1/nn/rnn_cell/LSTMCell
https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN
Attributes:
units: dimensionality of the output space.
mode: Training or inference modes: non streaming, streaming.
inference_batch_size: batch size for inference mode
return_sequences: Whether to return the last output. in the output sequence,
or the full sequence.
use_peepholes: True to enable diagonal/peephole connections
num_proj: The output dimensionality for the projection matrices. If None, no
projection is performed. It will be used only if use_peepholes is True.
unroll: If True, the network will be unrolled, else a symbolic loop will be
used. For any inference mode it will be set True inside.
stateful: If True, the last state for each sample at index i in a batch will
be used as initial state for the sample of index i in the following batch.
If model will be in streaming mode then it is better to train model with
stateful=True This flag is about stateful training and applied during
training only.
"""
def __init__(self,
units=64,
mode=modes.Modes.TRAINING,
inference_batch_size=1,
return_sequences=False,
use_peepholes=False,
num_proj=128,
unroll=False,
stateful=False,
name='LSTM',
**kwargs):
super(LSTM, self).__init__(**kwargs)
self.mode = mode
self.inference_batch_size = inference_batch_size
self.units = units
self.return_sequences = return_sequences
self.num_proj = num_proj
self.use_peepholes = use_peepholes
self.stateful = stateful
if mode != modes.Modes.TRAINING: # in any inference mode
# let's unroll lstm, so there is no symbolic loops / control flow
unroll = True
self.unroll = unroll
if self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE):
if use_peepholes:
self.lstm_cell = tf1.nn.rnn_cell.LSTMCell(
num_units=units, use_peepholes=True, num_proj=num_proj, name='cell')
self.lstm = tf.keras.layers.RNN(
cell=self.lstm_cell,
return_sequences=return_sequences,
unroll=self.unroll,
stateful=self.stateful)
else:
self.lstm = tf.keras.layers.LSTM(
units=units,
return_sequences=return_sequences,
name='cell',
unroll=self.unroll,
stateful=self.stateful)
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
# create state varaible for stateful streamable inference
self.input_state1 = self.add_weight(
name='input_state1',
shape=[inference_batch_size, units],
trainable=False,
initializer=tf.zeros_initializer)
if use_peepholes:
# second state in peepholes LSTM has different dimensions with
# the first state due to projection layer with dim num_proj
self.input_state2 = self.add_weight(
name='input_state2',
shape=[inference_batch_size, num_proj],
trainable=False,
initializer=tf.zeros_initializer)
self.lstm_cell = tf1.nn.rnn_cell.LSTMCell(
num_units=units, use_peepholes=True, num_proj=num_proj, name='cell')
else:
# second state in the standard LSTM has the same dimensions with
# the first state
self.input_state2 = self.add_weight(
name='input_state2',
shape=[inference_batch_size, units],
trainable=False,
initializer=tf.zeros_initializer)
self.lstm_cell = tf.keras.layers.LSTMCell(units=units, name='cell')
self.lstm = None
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming mode with external state state,
# state becomes an input output placeholders
self.input_state1 = tf.keras.layers.Input(
shape=(units,),
batch_size=inference_batch_size,
name=self.name + 'input_state1')
if use_peepholes:
self.input_state2 = tf.keras.layers.Input(
shape=(num_proj,),
batch_size=inference_batch_size,
name=self.name + 'input_state2')
self.lstm_cell = tf1.nn.rnn_cell.LSTMCell(
num_units=units, use_peepholes=True, num_proj=num_proj)
else:
self.input_state2 = tf.keras.layers.Input(
shape=(units,),
batch_size=inference_batch_size,
name=self.name + 'input_state2')
self.lstm_cell = tf.keras.layers.LSTMCell(units=units, name='cell')
self.lstm = None
self.outp
|
ut_state1 = None
self.output_state2 = None
def call(self, inputs):
if inputs.sh
|
ape.rank != 3: # [batch, time, feature]
raise ValueError('inputs.shape.rank:%d must be 3' % inputs.shape.rank)
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
# run streamable inference on input [batch, 1, features]
# returns output [batch, 1, units]
return self._streaming_internal_state(inputs)
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming mode with external state
# in addition to output we return output state
output, self.output_state1, self.output_state2 = self._streaming_external_state(
inputs, self.input_state1, self.input_state2)
return output
elif self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE):
# run non streamable training or non streamable inference
# on input [batch, time, features], returns [batch, time, units]
return self._non_streaming(inputs)
else:
raise ValueError(f'Encountered unexpected mode `{self.mode}`.')
def get_config(self):
config = {
'mode': self.mode,
'inference_batch_size': self.inference_batch_size,
'units': self.units,
'return_sequences': self.return_sequences,
'unroll': self.unroll,
'num_proj': self.num_proj,
'use_peepholes': self.use_peepholes,
'stateful': self.stateful,
}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_input_state(self):
# input state is used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.input_s
|
Eigenlabs/EigenD
|
tools/packages/SCons/Tool/sunlink.py
|
Python
|
gpl-3.0
| 2,437
| 0.002052
|
"""SCons.Tool.sunlink
Tool-specific initialization for the Sun Solaris (Forte) linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunlink.py 4577 2009/12/27 19:43:56 scons"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory bec
|
ause it doesn't exist
# (IOError) or isn
|
't readable (OSError) is okay.
dirs = []
for d in dirs:
linker = '/opt/' + d + '/bin/CC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""Add Builders and construction variables for Forte to an Environment."""
link.generate(env)
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env.Append(LINKFLAGS=['$__RPATH'])
env['RPATHPREFIX'] = '-R'
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return ccLinker
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
SujaySKumar/bedrock
|
bedrock/mozorg/forms.py
|
Python
|
mpl-2.0
| 30,635
| 0.00062
|
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from operator import itemgetter
import re
from datetime import datetime
from random import randrange
from django import forms
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.forms import widgets
from django.utils.safestring import mark_safe
from localflavor.us.forms import USStateField
from localflavor.us.us_states import STATE_CHOICES
import basket
from basket.base import request
from lib.l10n_utils.dotlang import _
from lib.l10n_utils.dotlang import _lazy
from product_details import product_details
from .email_contribute import INTEREST_CHOICES
FORMATS = (('H', _lazy('HTML')), ('T', _lazy('Text')))
LANGS_TO_STRIP = ['en-US', 'es']
PARENTHETIC_RE = re.compile(r' \([^)]+\)$')
LANG_FILES = ['firefox/partners/index', 'mozorg/contribute', 'mozorg/contribute/index']
def strip_parenthetical(lang_name):
"""
Remove the parenthetical from the end of the language name string.
"""
return PARENTHETIC_RE.sub('', lang_name, 1)
class SideRadios(widgets.RadioFieldRenderer):
"""Render radio buttons as labels"""
def render(self):
radios = [unicode(w) for idx, w in enumerate(self)]
return mark_safe(''.join(radios))
class PrivacyWidget(widgets.CheckboxInput):
"""Render a checkbox with privacy text. Lots of pages need this so
it should be standardized"""
def render(self, name, value, attrs=None):
attrs['required'] = 'required'
input_txt = super(PrivacyWidget, self).render(name, value, attrs)
policy_txt = _(u'I’m okay with Mozilla handling my info as explained '
u'in <a href="%s">this Privacy Policy</a>')
return mark_safe(
'<label for="%s" class="privacy-check-label">'
'%s '
'<span class="title">%s</span></label>'
% (attrs['id'], input_txt,
policy_txt % reverse('privacy'))
)
class HoneyPotWidget(widgets.TextInput):
"""Render a text field to (hopefully) trick bots. Will be used on many pages."""
def render(self, name, value, attrs=None):
honeypot_txt = _(u'Leave this field empty.')
# semi-randomized in case we have more than one per page.
# this is maybe/probably overthought
honeypot_id = 'office-fax-' + str(randrange(1001)) + '-' + str(datetime.now().strftime("%Y%m%d%H%M%S%f"))
return mark_safe(
'<div class="super-priority-field">'
'<label for="%s">%s</label>'
'<input type="text" name="office_fax" id="%s">'
'</div>' % (honeypot_id, honeypot_txt, honeypot_id))
class URLInput(widgets.TextInput):
input_type = 'url'
class EmailInput(widgets.TextInput):
input_type = 'email'
class DateInput(widgets.DateInput):
input_type = 'date'
class TimeInput(widgets.TimeInput):
input_type = 'time'
class TelInput(widgets.TextInput):
input_type = 'tel'
class NumberInput(widgets.TextInput):
input_type = 'number'
class L10nSelect(forms.Select):
def render_option(self, selected_choices, option_value, option_label):
if option_value == '':
option_label = u'-- {0} --'.format(_('select'))
return super(L10nSelect, self).render_option(selected_choices, option_value, option_label)
class ContributeSignupForm(forms.Form):
required_attr = {'required': 'required'}
empty_choice = ('', '')
category_choices = (
('coding', _lazy('Coding')),
('testing', _lazy('Testing')),
('writing', _lazy('Writing')),
('teaching', _lazy('Teaching')),
('helping', _lazy('Helping')),
('translating', _lazy('Translating')),
('activism', _lazy('Activism')),
('dontknow', _lazy(u'I don’t know')),
)
coding_choices = (
empty_choice,
('coding-firefox', _lazy('Firefox')),
('coding-firefoxos', _lazy('Firefox OS')),
('coding-websites', _lazy('Websites')),
('coding-addons', _lazy('Firefox add-ons')),
('coding-marketplace', _lazy('HTML5 apps')),
('coding-webcompat', _lazy('Diagnosing Web compatibility issues')),
('coding-cloud', _lazy('Online services')),
)
testing_choices = (
empty_choice,
('testing-firefox', _lazy('Firefox and Firefox OS')),
('testing-addons', _lazy('Firefox add-ons')),
('testing-marketplace', _lazy('HTML5 apps')),
('testing-websites', _lazy('Websites')),
('testing-webcompat', _lazy('Web compatibility')),
)
translating_choices = (
empty_choice,
('translating-products', _lazy('Products')),
('translating-websites', _lazy('Websites')),
('translating-tools', _lazy(u'I’d like to work on localization tools')),
)
writing_choices = (
empty_choice,
('writing-journalism', _lazy('Journalism')),
('writing-techusers', _lazy('Technical docs for users')),
('writing-techdevs', _lazy('Technical docs for developers')),
('writing-addons', _lazy('Technical docs for Firefox add-ons')),
('writing-marketplace', _lazy('Technical docs for HTML5 apps')),
)
teaching_choices = (
empty_choice,
('teaching-webmaker', _lazy('Teach the Web (Webmaker)')),
('teaching-fellowships', _lazy('Open News fellowships')),
('teaching-hive', _lazy('Hive - Community networks of educators/mentors')),
('teaching-science', _lazy('Open Web science research')),
)
email = forms.EmailField(widget=EmailInput(attrs=required_attr))
privacy = forms.BooleanField(widget=PrivacyWidget)
category = forms.ChoiceField(choices=category_choices,
widget=forms.RadioSelect(attrs=required_attr))
area_coding = forms.ChoiceField(choices=coding_choices, required=False, widget=L10nSelect)
area_testing = forms.ChoiceField(choices=testing_choices, required=False, widget=L10nSelect)
area_translating
|
= forms.ChoiceField(choices=translating_choices, required=False,
widget=L10nSelect)
area_writing = forms.ChoiceField(choices=writing_choices, required=False, widget=L10nSelect)
area_teaching = forms.ChoiceField(choices=teaching_choices, required=False, widget=L10nSelect
|
)
name = forms.CharField(widget=forms.TextInput(attrs=required_attr))
message = forms.CharField(widget=forms.Textarea, required=False)
newsletter = forms.BooleanField(required=False)
format = forms.ChoiceField(widget=forms.RadioSelect(attrs=required_attr), choices=(
('H', _lazy('HTML')),
('T', _lazy('Text')),
))
def __init__(self, locale, *args, **kwargs):
regions = product_details.get_regions(locale)
regions = sorted(regions.iteritems(), key=itemgetter(1))
regions.insert(0, self.empty_choice)
super(ContributeSignupForm, self).__init__(*args, **kwargs)
self.locale = locale
self.fields['country'] = forms.ChoiceField(choices=regions,
widget=L10nSelect(attrs={'required': 'required'}))
def clean(self):
cleaned_data = super(ContributeSignupForm, self).clean()
category = cleaned_data.get('category')
# only bother if category was supplied
if category:
area_name = 'area_' + category
if area_name in cleaned_data and not cleaned_data[area_name]:
required_message = self.fields[area_name].error_messages['required']
self._errors[area_name] = self.error_class([required_message])
del cleaned_data[area_name]
return cleaned_data
class ContributeForm(forms.Form):
email = forms.EmailField(widget=EmailInput(attrs={'required': 'required'}))
privacy = forms.BooleanField(widget=PrivacyWidget)
newsletter = forms.BooleanField(required=False)
interest = forms.ChoiceField(
choices=INTEREST_CHOICES,
widget=forms.
|
rj3d/PiTempController
|
config_reader.py
|
Python
|
gpl-2.0
| 1,798
| 0.05228
|
class IoConfig:
def __init__(self, i_path, mode, o_pin=None, set_temp=None, buffer_temp=None, direction=None):
self.i_path = i_path
self.mode = mode
self.o_pin = o_pin
self.set_temp = set_temp
self.buffer_temp = buffer_temp
self.on_fn = self.get_on_fn(direction)
self.off_fn = self.get_off_fn(direction)
def get_on_fn(self, direction):
if direction == '-':
return lambda t: self.set_temp < t
elif direction == '+':
return lambda t: self.set_temp > t
return None
def get_off_fn(self, direction):
if direction == '-':
return lambda t: self.set_temp - self.buffer_temp >= t
elif direction == '+':
return lambda t: self.set_temp + self.buffer_temp <= t
return None
def __repr__(self):
return 'IoConfig()'
def __str__(sel
|
f):
str_lst = ['Input path:', str(self.i_path),
'Mode:', str(self.mode),
'Output pin:', str(self.o_pin),
'Set temp:', str(self.set_temp),
'Buffer temp:', str(self.buffer_temp)
]
return '\t'.join(str_lst)
class Config:
def __init__(self, config_path, temp_path = '/sys/bus/w1/devices/%(TEMP)s/w1_slave'):
f = open(config_path)
self.log_path = f.readline().strip()
self.io = []
for line in f:
splat = line.strip().split()
|
if len(splat) == 1:
self.io.append( IoConfig( temp_path % { "TEMP" :splat[0] },
'MONITOR'
) )
else:
self.io.append( IoConfig( temp_path % { "TEMP" :splat[0] },
'CONTROLLER',
int(splat[1]),
float(splat[2]),
float(splat[3]),
splat[4]
) )
f.close()
def __repr__(self):
return 'Config()'
def __str__(self):
str_lst = [ 'Log path:',
self.log_path,
'IO Configs:',
'\n'.join([str(c) for c in self.io])
]
return '\n'.join(str_lst)
if __name__ == "__main__":
import sys
config = Config(sys.argv[1])
print config
|
billiob/papyon
|
papyon/gnet/message/SOAP.py
|
Python
|
gpl-2.0
| 5,704
| 0.003857
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Ali Sabil <ali.sabil@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""SOAP Messages structures."""
import papyon.util.element_tree as ElementTree
import papyon.util.string_io as StringIO
__all__=['SOAPRequest', 'SOAPResponse']
class NameSpace:
SOAP_ENVELOPE = "http://schemas.xmlsoap.org/soap/envelope/"
SOAP_ENCODING = "http://schemas.xmlsoap.org/soap/encoding/"
XML_SCHEMA = "http://www.w3.org/1999/XMLSchema"
XML_SCHEMA_INSTANCE = "http://www.w3.org/1999/XMLSchema-instance"
class Encoding:
SOAP = "http://schemas.xmlsoap.org/soap/encoding/"
class _SOAPSection:
ENVELOPE = "{" + NameSpace.SOAP_ENVELOPE + "}Envelope"
HEADER = "{" + NameSpace.SOAP_ENVELOPE + "}Header"
BODY = "{" + NameSpace.SOAP_ENVELOPE + "}Body"
class _SOAPElement(object):
def __init__(self, element):
self.element = element
def append(self, tag, namespace=None, type=None, attrib={}, value=None, **kwargs):
if namespace is not None:
tag = "{" + namespace + "}" + tag
if type:
if isinstance(type, str):
type = ElementTree.QName(type, NameSpace.XML_SCHEMA)
else:
type = ElementTree.QName(type[1], type[0])
attrib["{" + NameSpace.XML_SCHEMA_INSTANCE + "}type"] = type
child = ElementTree.SubElement(self.element, tag, attrib, **kwargs)
child.text = value
return _SOAPElement(child)
def __str__(self):
return ElementTree.tostring(self.element, "utf-8")
class SOAPRequest(object):
"""Abstracts a SOAP Request to be sent to the server"""
def __init__(self, method, namespace=None, encoding_style=Encoding.SOAP, **attr):
"""Initializer
@param method: the method to be called
@type method: string
@param namespace: the namespace that the method belongs to
@type namespace: URI
@param encoding_style: the encoding style for this method
@type encoding_style: URI
@param attr: attributes to be attached to the method"""
self.header = ElementTree.Element(_SOAPSection.HEADER)
if namespace is not None:
method = "{" + namespace + "}" + method
self.method = ElementTree.Element(method)
if encoding_style is not None:
self.method.set("{" + NameSpace.SOAP_ENVELOPE + "}encodingStyle", encoding_style)
for attr_key, attr_value in attr.iteritems():
self.method.set(attr_key, attr_value)
def add_argument(self, name, namespace=None, type=None, attrib=None, value=None, **kwargs):
if namespace is not None:
name = "{" + namespace + "}" + name
return self._add_element(self.method, name, type, attrib, value, **kwargs)
def add_header(self, name, namespace=None, attrib=None, value=None, **kwargs):
if namespace is not None:
name = "{" + namespace + "}" + name
return self._add_element(self.header, name, None, attrib, value, **kwargs)
def _add_element(self, parent, name, type=None, attributes=None, value=None, **kwargs):
elem = ElementTree.SubElement(parent, name)
if attributes is None:
attributes = {}
attributes.update(kwargs)
if type:
type = self._qname(type, NameSpace.XML_SCHEMA)
elem.set("{" + NameSpace.XML_SCHEMA_INSTANCE + "}type", type)
for attr_key, attr_value in attributes.iteritems():
elem.set(attr_key, attr_value)
elem.text = value
return _SOAPElement(elem)
def _qname(self, name, default_ns):
if name[0] != "{":
return ElementTree.Q
|
Name(default_ns, name)
return ElementTree.QName(name)
def __str__(self):
|
envelope = ElementTree.Element(_SOAPSection.ENVELOPE)
if len(self.header) > 0:
envelope.append(self.header)
body = ElementTree.SubElement(envelope, _SOAPSection.BODY)
body.append(self.method)
return "<?xml version=\"1.0\" encoding=\"utf-8\"?>" +\
ElementTree.tostring(envelope, "utf-8")
def __repr__(self):
return "<SOAP request %s>" % self.method.tag
class SOAPResponse(object):
def __init__(self, data):
self.tree = self._parse(data)
self.header = self.tree.find(_SOAPSection.HEADER)
self.body = self.tree.find(_SOAPSection.BODY)
def find(self, path):
return self.tree.find(path)
def _parse(self, data):
events = ("start", "end", "start-ns", "end-ns")
ns = []
data = StringIO.StringIO(data)
context = ElementTree.iterparse(data, events=events)
for event, elem in context:
if event == "start-ns":
ns.append(elem)
elif event == "end-ns":
ns.pop()
elif event == "start":
elem.set("(xmlns)", tuple(ns))
data.close()
return context.root
|
mypaint/mypaint
|
gui/device.py
|
Python
|
gpl-2.0
| 22,624
| 0.000575
|
# This file is part of MyPaint.
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2019 by the MyPaint Development Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Device specific settings and configuration"""
## Imports
from __future__ import division, print_function
import logging
import collections
import re
from lib.gettext import C_
from lib.gibindings import Gtk
from lib.gibindings import Gdk
from lib.gibindings import Pango
from lib.observable import event
import gui.application
import gui.mode
logger = logging.getLogger(__name__)
## Device prefs
# The per-device settings are stored in the prefs in a sub-dict whose
# string keys are formed from the device name and enough extra
# information to (hopefully) identify the device uniquely. Names are not
# unique, and IDs vary according to the order in which you plug devices
# in. So for now, our unique strings use a combination of the device's
# name, its source as presented by GDK, and the number of axes.
_PREFS_ROOT = "input.devices"
_PREFS_DEVICE_SUBKEY_FMT = "{name}:{source}:{num_axes}"
## Device type strings
_DEVICE_TYPE_STRING = {
Gdk.InputSource.CURSOR: C_(
"prefs: device's type label",
"Cursor/puck",
),
Gdk.InputSource.ERASER: C_(
"prefs: device's type label",
"Eraser",
),
Gdk.InputSource.KEYBOARD: C_(
"prefs: device's type label",
"Keyboard",
),
Gdk.InputSource.MOUSE: C_(
"prefs: device's type label",
"Mouse",
),
Gdk.InputSource.PEN: C_(
"prefs: device's type label",
"Pen",
),
Gdk.InputSource.TOUCHPAD: C_(
"prefs: device's type label",
"Touchpad",
),
Gdk.InputSource.TOUCHSCREEN: C_(
"prefs: device's type label",
"Touchscreen",
),
}
## Settings consts and classes
class AllowedUsage:
"""Consts describing how a device may interact with the canvas"""
ANY = "any" #: Device can be used for any tasks.
NOPAINT = "nopaint" #: No direct painting, but can manipulate objects.
NAVONLY = "navonly" #: Device can only be used for navigation.
IGNORED = "ignored" #: Device cannot interact with the canvas at all.
VALUES = (ANY, IGNORED, NOPAINT, NAVONLY)
DISPLAY_STRING = {
IGNORED: C_(
"device settings: allowed usage",
u"Ignore",
),
ANY: C_(
"device settings: allowed usage",
u"Any Task",
),
NOPAINT: C_(
"device settings: allowed usage",
u"Non-painting tasks",
),
NAVONLY: C_(
"device settings: allowed usage",
u"Navigation only",
),
}
BEHAVIOR_MASK = {
ANY: gui.mode.Behavior.ALL,
IGNORED: gui.mode.Behavior.NONE,
NOPAINT: gui.mode.Behavior.NON_PAINTING,
NAVONLY: gui.mode.Behavior.CHANGE_VIEW,
}
class ScrollAction:
"""Consts describing how a device's scroll events should be used.
The user can assign one of these values to a device to configure
whether they'd prefer panning or scrolling for unmodified scroll
events. This setting can be queried via the device monitor.
"""
ZOOM = "zoom" #: Alter the canvas scaling
PAN = "pan" #: Pan across the canvas
VALUES = (ZOOM, PAN)
DISPLAY_STRING = {
ZOOM: C_("device settings: unmodified scroll action", u"Zoom"),
PAN: C_("device settings: unmodified scroll action", u"Pan"),
}
class Settings (object):
"""A device's settings"""
DEFAULT_USAGE = AllowedUsage.VALUES[0]
DEFAULT_SCROLL = ScrollAction.VALUES[0]
def __init__(self, prefs, usage=DEFAULT_USAGE, scroll=DEFAULT_SCROLL):
super(Settings, self).__init__()
self._usage = self.DEFAULT_USAGE
self._update_usage_mask()
self._scroll = self.DEFAULT_SCROLL
self._prefs = prefs
self._load_from_prefs()
@property
def usage(self):
return self._usage
@usage.setter
def usage(self, value):
if value not in AllowedUsage.VALUES:
raise ValueError("Unrecognized usage value")
self._usage = value
self._update_usage_mask()
self._save_to_prefs()
@property
def usage_mask(self):
return self._usage_mask
@property
def scroll(self):
return self._scroll
@scroll.setter
def scroll(self, value):
if value not in ScrollAction.VALUES:
raise ValueError("Unrecognized scroll value")
self._scroll = value
self._save_to_prefs()
def _load_from_prefs(self):
usage = self._prefs.get("usage", self.DEFAULT_USAGE)
if usage not in AllowedUsage.VALUES:
usage = self.DEFAULT_USAGE
self._usage = usage
scroll = self._prefs.get("scroll", self.DEFAULT_SCROLL)
if scroll not in ScrollAction.VALUES:
scroll = self.DEFAULT_SCROLL
self._scroll = scroll
self._update_usage_mask()
def _save_to_prefs(self):
self._prefs.update({
"usage": self._usage,
"scroll": self._scroll,
})
def _update_usage_mask(self):
self._usage_mask = AllowedUsage.BEHAVIOR_MASK[self._usage]
## Main class defs
class Monitor (object):
"""Monitors device use & plugging, and manages their configuration
An instance resides in the main application. It is responsible for
monitoring known devices, determining their characteristics, and
storing their settings. Per-device settings are stored in the main
application preferences.
"""
def __init__(self, app):
"""Initializes, assigning initial input device uses
:param app: the owning Application instance.
:type app: gui.application.Application
"""
super(Monitor, self).__init__()
self._app = app
if app is not None:
self._prefs = app.preferences
else:
self._prefs = {}
if _PREFS_ROOT not in self._prefs:
self._prefs[_PREFS_ROOT] = {}
# Transient device information
self._device_settings = collections.OrderedDict() # {dev: settings}
self._last_event_device = None
self._last_pen_device = None
disp = Gdk.Display.get_default()
mgr = disp.get_device_manager()
mgr.connect("device-added", self._device_added_cb)
mgr.connect("device-removed",
|
self._device_removed_cb)
self
|
._device_manager = mgr
for physical_device in mgr.list_devices(Gdk.DeviceType.SLAVE):
self._init_device_settings(physical_device)
## Devices list
def get_device_settings(self, device):
"""Gets the settings for a device
:param Gdk.Device device: a physical ("slave") device
:returns: A settings object which can be manipulated, or None
:rtype: Settings
Changes to the returned object made via its API are saved to the
user preferences immediately.
If the device is a keyboard, or is otherwise unsuitable as a
pointing device, None is returned instead. The caller needs to
check this case.
"""
return (self._device_settings.get(device)
or self._init_device_settings(device))
def _init_device_settings(self, device):
"""Ensures that the device settings are loaded for a device"""
source = device.get_source()
if source == Gdk.InputSource.KEYBOARD:
return
num_axes = device.get_n_axes()
if num_axes < 2:
return
settings = self._device_settings.get(device)
if not settings:
try:
vendor_id = device.get_vendor_id()
product_id = device.get_product_id()
except AttributeError:
# New in GDK 3.16
vendor_id = "?"
product_id = "?"
logger.info(
|
andrmuel/gr-dab
|
python/qa/qa_measure_processing_rate.py
|
Python
|
gpl-3.0
| 1,233
| 0.030819
|
#!/usr/bin/env python
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import grdab
class qa_measure_processing_rate(gr_unittest.TestCase):
"""
@brief QA for measure processing rate sink.
This class implements a test bench to verify the corresponding C++ class.
"""
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_measure_processing_rate(self):
src = blocks.null_source(gr.sizeof_gr_complex)
throttle = blocks.throttle(gr.sizeof_gr_complex, 1000000)
head = blocks.head(gr.sizeof_gr_complex, 200000)
sink = grdab.measure_processing_r
|
ate(gr.sizeof_gr_complex,100000)
self.tb.connect(src, throttle, head, sink)
self.tb.run()
rate =
|
sink.processing_rate()
assert(rate > 900000 and rate < 1100000)
def test_002_measure_processing_rate(self):
src = blocks.null_source(gr.sizeof_char)
throttle = blocks.throttle(gr.sizeof_char, 10000000)
head = blocks.head(gr.sizeof_char, 1000000)
sink = grdab.measure_processing_rate(gr.sizeof_char,1000000)
self.tb.connect(src, throttle, head, sink)
self.tb.run()
rate = sink.processing_rate()
assert(rate > 8000000 and rate < 12000000)
if __name__ == '__main__':
gr_unittest.main()
|
andrewsmedina/django-admin2
|
djadmin2/views.py
|
Python
|
bsd-3-clause
| 21,359
| 0.00014
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
import operator
from datetime import datetime
from functools import reduce
import extra_views
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import (PasswordChangeForm,
AdminPasswordChangeForm)
from django.contrib.auth.views import (logout as auth_logout,
login as auth_login)
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import models, router
from django.db.models.fields import FieldDoesNotExist
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy
from django.views import generic
from . import permissions, utils
from .filters import build_list_filter, build_date_filter
from .forms import AdminAuthenticationForm
from .models import LogEntry
from .viewmixins import Admin2Mixin, Admin2ModelMixin, Admin2ModelFormMixin
class AdminView(object):
def __init__(self, url, view, name=None):
self.url = url
self.view = view
self.name = name
def get_view_kwargs(self):
return {
'app_label': self.model_admin.app_label,
'model': self.model_admin.model,
'model_name': self.model_admin.model_name,
'model_admin': self.model_admin,
}
class IndexView(Admin2Mixin, generic.TemplateView):
"""Context Variables
:apps: A dictionary of apps, each app being a dictionary with keys
being models and the value being djadmin2.types.ModelAdmin2
objects.
:app_verbose_names: A dictionary containing the app verbose names,
each item has a key being the `app_label` and
the value being a string, (or even a lazy
translation object), with the custom app name.
"""
default_template_name = "index.html"
registry = None
apps = None
app_verbose_names = None
def get_context_data(self, **kwargs):
data = super(IndexView, self).get_context_data(**kwargs)
data.update({
'apps': self.apps,
'app_verbose_names': self.app_verbose_names,
})
return data
class AppIndexView(Admin2Mixin, generic.TemplateView):
"""Context Variables
:app_label: Name of your app
:registry: A dictionary of registered models for a given app, each
item has a key being the model and the value being
djadmin2.types.ModelAdmin2 objects.
:app_verbose_names: A dictionary containing the app verbose name for
a given app, the item has a key being the
`app_label` and the value being a string, (or
even a lazy translation object), with the custom
app name.
"""
default_template_name = "app_index.html"
registry = None
apps = None
app_verbose_names = None
def get_context_data(self, **kwargs):
data = super(AppIndexView, self).get_context_data(**kwargs)
app_label = self.kwargs['app_label']
registry = self.apps[app_label]
data.update({
'app_label': app_label,
'registry': registry,
'app_verbose_names': self.app_verbose_names,
})
return data
class ModelListView(Admin2ModelMixin, generic.ListView):
"""Context Variables
:is_paginated: If the page is paginated (page has a next button)
:model: Type of object you are editing
:model_name: Name of the object you are editing
:app_label: Name of your app
:app_verbose_names: A dictionary containing the app verbose name for
a given app, the item has a key being the
`app_label` and the value being a string, (or
even a lazy translation object), with the custom
app name.
"""
default_template_name = "model_list.html"
permission_classes = (
permissions.IsStaffPermission,
permissions.ModelViewPermission)
def post(self, request):
action_name = request.POST['action']
action_callable = self.get_actions()[action_name]['action_callable']
selected_model_pks = request.POST.getlist('selected_model_pk')
if getattr(action_callable, "only_selected", True):
queryset = self.model.objects.filter(pk__in=selected_model_pks)
else:
queryset = self.model.objects.all()
# If action_callable is a class subclassing from
# actions.BaseListAction then we generate the callable object.
if hasattr(action_callable, "process_queryset"):
response = action_callable.as_view(queryset=queryset, model_admin=self.model_admin)(request)
else:
# generate the reponse if a function.
response = action_callable(request, queryset)
if response is None:
return HttpResponseRedirect(self.get_success_url())
else:
return response
def get_search_results(self, queryset, search_term):
# Lifted from django.contrib.admin
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
orm_lookups = [construct_search(str(search_field))
for search_field in self.model_admin.search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
opts = utils.model_options(self.get_model())
if utils.lookup_needs_distinct(opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_queryset(self):
queryset = super(ModelListView, self).get_queryset()
search_term = self.request.GET.get('q', None)
search_use_distinct = False
if self.model_admin.search_fields and search_term:
queryset, search_use_distinct = self.get_search_results(
queryset, search_term)
queryset = self._modify_queryset_for_ordering(queryset)
if self.model_admin.list_filter:
queryset = self.build_list_filter(queryset).qs
if self.model_admin.date_hierarchy:
queryset = self.build_date_filter(queryset, self.model_admin.date_hierarchy).qs
queryset = self._modify_queryset_for_sort(queryset)
if search_use_distinct:
return queryset.distinct()
else:
return queryset
def _modify_queryset_for_ordering(self, queryset):
ordering = self.model_admin.get_ordering(self.request)
if ordering:
queryset = queryset.order_by(*ordering)
return queryset
def _modify_queryset_for_sort(self, queryse
|
t):
|
# If we are sorting AND the field exists on the model
sort_by = self.request.GET.get('sort', None)
if sort_by:
# Special case when we are not explicityly displaying fields
if sort_by == '-__str__':
queryset = queryset[::-1]
try:
# If we sort on '-' remove it before looking for that field
field_exists = sort_by
if field_exists[0] == '-':
field_exists = field_exists[1:]
|
rcatwood/Savu
|
savu/plugins/loaders/multi_modal_loaders/i18_loaders/i18stxm_loader.py
|
Python
|
gpl-3.0
| 1,776
| 0.001689
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: I18stxm_loader
:platform: Unix
:synopsis: A class for loading I18's stxm data
.. moduleauthor:: Aaron Parsons <scientificsoftware
|
@diamond.ac.uk>
"""
from savu.plugins.loaders.multi_modal_loaders.base_i18_multi_modal_loader import BaseI18MultiModalLoader
from savu.plugins.utils import register_plugin
@register_plugin
class I18stxmLoader(BaseI18MultiModalLoader):
"""
A class to load tomography data from an NXstxm file
:param stxm_detector: path to stxm. Default:'entry1/raster_counterTimer01/It'.
"""
def __init__(self, name='I18stxmLoader'):
super(I18st
|
xmLoader, self).__init__(name)
def setup(self):
"""
Define the input nexus file
:param path: The full path of the NeXus file to load.
:type path: str
"""
data_str = self.parameters['stxm_detector']
data_obj = self.multi_modal_setup('stxm')
data_obj.data = data_obj.backing_file[data_str]
data_obj.set_shape(data_obj.data.shape)
self.set_motors(data_obj, 'stxm')
self.add_patterns_based_on_acquisition(data_obj, 'stxm')
self.set_data_reduction_params(data_obj)
|
dsiddharth/access-keys
|
keystone/tests/contrib/kds/paths.py
|
Python
|
apache-2.0
| 915
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
|
obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR
|
CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
TEST_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
TMP_DIR = os.path.join(TEST_DIR, '..', '..', 'tmp')
def root_path(*args):
return os.path.join(TEST_DIR, *args)
def test_path(*args):
return os.path.join(TEST_DIR, *args)
def tmp_path(*args):
return os.path.join(TMP_DIR, *args)
|
AlphaCluster/NewsBlur
|
vendor/paypal/standard/pdt/forms.py
|
Python
|
mit
| 240
| 0.004167
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from paypal.standard.forms import PayPalStandardBaseForm
from paypal.standard.pdt.models import PayPalPDT
class PayPalPDTForm(PayPalStandardBaseForm):
class Meta:
model = PayP
|
alPDT
|
|
Bindernews/TheHound
|
identifiers/rpm_identifier.py
|
Python
|
mit
| 205
| 0.04878
|
from identifier im
|
port Result
RPM_PATTERNS = [
'ED AB EE DB'
]
class RpmResolver:
def identify(self, stream):
return Result('RPM')
def load(
|
hound):
hound.add_matches(RPM_PATTERNS, RpmResolver())
|
Lydwen/Mr.Statamutation
|
Mr.Statapython/statapython/stataspoon/__init__.py
|
Python
|
mit
| 45
| 0
|
fr
|
om .mutationstester import MutationsTe
|
ster
|
kvar/ansible
|
test/lib/ansible_test/_internal/integration/__init__.py
|
Python
|
gpl-3.0
| 11,813
| 0.00364
|
"""Ansible integration test infrastructure."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import contextlib
import json
import os
import shutil
import tempfile
from .. import types as t
from ..target import (
analyze_integration_target_dependencies,
walk_integration_targets,
)
from ..config import (
IntegrationConfig,
NetworkIntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
)
from ..util import (
ApplicationError,
display,
make_dirs,
COVERAGE_CONFIG_NAME,
MODE_DIRECTORY,
MODE_DIRECTORY_WRITE,
MODE_FILE,
to_bytes,
)
from ..util_common import (
named_temporary_file,
write_text_file,
ResultType,
)
from ..coverage_util import (
generate_coverage_config,
)
from ..cache import (
CommonCache,
)
from ..cloud import (
CloudEnvironmentConfig,
)
from ..data import (
data_context,
)
def setup_common_temp_dir(args, path):
"""
:type args: IntegrationConfig
:type path: str
"""
if args.explain:
return
os.mkdir(path)
os.chmod(path, MODE_DIRECTORY)
if args.coverage:
coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME)
coverage_config = generate_coverage_config(args)
write_text_file(coverage_config_path, coverage_config)
os.chmod(coverage_config_path, MODE_FILE)
coverage_output_path = os.path.join(path, ResultType.COVERAGE.name)
os.mkdir(coverage_output_path)
os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
def generate_dependency_map(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str, set[IntegrationTarget]]
"""
targets_dict = dict((target.name, target) for target in integration_targets)
target_dependencies = analyze_integration_target_dependencies(integration_targets)
dependency_map = {}
invalid_targets = set()
for dependency, dependents in target_dependencies.items():
dependency_target = targets_dict.get(dependency)
if not dependency_target:
invalid_targets.add(dependency)
continue
for dependent in dependents:
if dependent not in dependency_map:
dependency_map[dependent] = set()
dependency_map[dependent].add(dependency_target)
if invalid_targets:
raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
return dependency_map
def get_files_needed(target_dependencies):
"""
:type target_dependencies: list[IntegrationTarget]
:rtype: list[str]
"""
files_needed = []
for target_dependency in target_dependencies:
files_needed += target_dependency.needs_file
files_needed = sorted(set(files_needed))
invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
if invalid_paths:
raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
return files_needed
def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None
"""Check the given inventory for issues."""
if args.docker or args.remote:
if os.path.exists(inventory_path):
with open(inventory_path) as inventory_file:
inventory = inventory_file.read()
if 'ansible_ssh_private_key_file' in inventory:
display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str
"""Return the inventory path used for the given integration configuration relative to the content root."""
inventory_names = {
PosixIntegrationConfig: 'inventory',
WindowsIntegrationConfig: 'inventory.winrm',
NetworkIntegrationConfig: 'inventory.networking',
} # type: t.Dict[t.Type[IntegrationConfig], str]
return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None
"""Make the given inventory available during delegation."""
if isinstance(args, PosixI
|
ntegrationConfig):
return
def inventory_callback(files): # t
|
ype: (t.List[t.Tuple[str, str]]) -> None
"""
Add the inventory file to the payload file list.
This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
"""
if data_context().content.collection:
working_path = data_context().content.collection.directory
else:
working_path = ''
inventory_path = os.path.join(working_path, get_inventory_relative_path(args))
if os.path.isfile(inventory_path_src) and os.path.relpath(inventory_path_src, data_context().content.root) != inventory_path:
originals = [item for item in files if item[1] == inventory_path]
if originals:
for original in originals:
files.remove(original)
display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
else:
display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
files.append((inventory_path_src, inventory_path))
data_context().register_payload_callback(inventory_callback)
@contextlib.contextmanager
def integration_test_environment(args, target, inventory_path_src):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:type inventory_path_src: str
"""
ansible_config_src = args.get_ansible_config()
ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
inventory_path = inventory_path_src
ansible_config = ansible_config_src
vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
return
# When testing a collection, the temporary directory must reside within the collection.
# This is necessary to enable support for the default collection for non-collection content (playbooks and roles).
root_temp_dir = os.path.join(ResultType.TMP.path, 'integration')
prefix = '%s-' % target.name
suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
suffix = '-ansible'
if args.explain:
temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
else:
make_dirs(root_temp_dir)
temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
try:
display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
inventory_relative_path = get_inventory_relative_path(args)
inventory_path = os.path.join(temp_dir, inventory_relative_path)
cache = IntegrationCache(args)
target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
files_needed = get_files_needed(target_dependencies)
integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
targets_dir = os.path.join(temp_dir, data_context().con
|
tiagoft/sampleseeker
|
sampleseeker.py
|
Python
|
gpl-3.0
| 1,431
| 0.01188
|
import web
import uuid
from jinja2 import Environment, FileSystemLoader
import os
files_directory = "db/data"
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
urls = (
'/', 'Index',
'/upload', 'Upload'
)
class Index:
def GET(self):
j2_env = Environment(loader=FileSystemLoader(THIS_DIR+"/template/"),
trim_blocks=True)
template_values = {"things_to_show": ["one", "two", "three"]}
return j2_env.get_template('index.html').render(template_values)
class Upload:
def GET(self):
return """<h
|
tml><head></head><body>
<form method="POST" enctype="multipart/form-data" action="">
<input type="file" name="myfile" />
<br/>
<input type="submit" />
</form>
</body></html>"""
def POST(self):
x = web.input(myfile={})
filepath = x.myfile.filename.replace('\\','/') # replaces the windows-style slashes with linux ones.
filename=filepath.split('/')[-1]
fout = open(files_directory +'/'+ uuid.uuid4().hex + filename,'w') # creates the file where the uploaded file should be stored
fout.write(x.myfile.file.read()) # writes the uploaded file to the newly created file.
fout.close() # closes the file, upload complete.
raise web.seeother('/upload')
if __name__ == '__main__':
app = web.application(urls, globals())
app.internalerror = web.debugerror
app.run()
|
Mitali-Sodhi/CodeLingo
|
Dataset/python/buildprefixdata.py
|
Python
|
mit
| 8,450
| 0.002604
|
#!/usr/bin/env python
"""Script to read the libphonenumber per-prefix metadata and generate Python code.
Invocation:
buildprefixdata.py [options] indir outfile module_prefix
Processes all of the per-prefix data under the given input directory and emit
generated Python code.
Options:
--var XXX : use this prefix for variable names in generated code
--flat : don't do per-locale processing
--sep C : expect metadata to be a list with C as separator
"""
# Based on original metadata data files from libphonenumber:
# resources/geocoding/*/*.txt, resources/carrier/*/*.txt
# Copyright (C) 2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
import re
import getopt
import datetime
# Use the local code in preference to any pre-installed version
sys.path.insert(0, '../../python')
from phonenumbers.util import prnt, rpr
PREFIXDATA_SUFFIX = ".txt"
BLANK_LINE_RE = re.compile(r'^\s*$', re.UNICODE)
COMMENT_LINE_RE = re.compile(r'^\s*#.*$', re.UNICODE)
DATA_LINE_RE = re.compile(r'^\+?(?P<prefix>\d+)\|(?P<stringdata>.*)$', re.UNICODE)
# Boilerplate header
PREFIXDATA_LOCALE_FILE_PROLOG = '''"""Per-prefix data, mapping each prefix to a dict of locale:name.
Auto-generated file, do not edit by hand.
"""
from %(module)s.util import u
'''
PREFIXDATA_FILE_PROLOG = '''"""Per-prefix data, mapping each prefix to a name.
Auto-generated file, do not edit by hand.
"""
from %(module)s.util import u
'''
# Copyright notice covering the XML metadata; include current year.
COPYRIGHT_NOTICE = """# Copyright (C) 2011-%s The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" % datetime.datetime.now().year
def load_locale_prefixdata_file(prefixdata, filename, locale=None, overall_prefix=None, separator=None):
"""Load per-prefix data from the given file, for the given locale and prefix.
We assume that this file:
- is encoded in UTF-8
- may have comment lines (starting with #) and blank lines
- has data lines of the form '<prefix>|<stringdata>'
- contains only data for prefixes that are extensions of the filename.
If overall_prefix is specified, lines are checked to ensure their prefix falls within this value.
If locale is spec
|
ified, prefixdata[prefix][locale] is filled in; otherwise, just prefixdata[prefix].
If separator is specified, t
|
he string data will be split on this separator, and the output values
in the dict will be tuples of strings rather than strings.
"""
with open(filename, "rb") as infile:
lineno = 0
for line in infile:
uline = line.decode('utf-8')
lineno += 1
dm = DATA_LINE_RE.match(uline)
if dm:
prefix = dm.group('prefix')
stringdata = dm.group('stringdata')
if stringdata != stringdata.rstrip():
print ("%s:%d: Warning: stripping trailing whitespace" % (filename, lineno))
stringdata = stringdata.rstrip()
if overall_prefix is not None and not prefix.startswith(overall_prefix):
raise Exception("%s:%d: Prefix %s is not within %s" %
(filename, lineno, prefix, overall_prefix))
if separator is not None:
stringdata = tuple(stringdata.split(separator))
if prefix not in prefixdata:
prefixdata[prefix] = {}
if locale is not None:
prefixdata[prefix][locale] = stringdata
else:
prefixdata[prefix] = stringdata
elif BLANK_LINE_RE.match(uline):
pass
elif COMMENT_LINE_RE.match(uline):
pass
else:
raise Exception("%s:%d: Unexpected line format: %s" %
(filename, lineno, line))
def load_locale_prefixdata(indir, separator=None):
"""Load per-prefix data from the given top-level directory.
Prefix data is assumed to be held in files <indir>/<locale>/<prefix>.txt.
The same prefix may occur in multiple files, giving the prefix's description
in different locales.
"""
prefixdata = {} # prefix => dict mapping locale to description
for locale in os.listdir(indir):
if not os.path.isdir(os.path.join(indir, locale)):
continue
for filename in glob.glob(os.path.join(indir, locale, "*%s" % PREFIXDATA_SUFFIX)):
overall_prefix, ext = os.path.splitext(os.path.basename(filename))
load_locale_prefixdata_file(prefixdata, filename, locale, overall_prefix, separator)
return prefixdata
def _stable_dict_repr(strdict):
"""Return a repr() for a dict keyed by a string, in sorted key order"""
lines = []
for key in sorted(strdict.keys()):
lines.append("'%s': %s" % (key, rpr(strdict[key])))
return "{%s}" % ", ".join(lines)
def _tuple_repr(data):
"""Return a repr() for a list/tuple"""
if len(data) == 1:
return "(%s,)" % rpr(data[0])
else:
return "(%s)" % ", ".join([rpr(x) for x in data])
def output_prefixdata_code(prefixdata, outfilename, module_prefix, varprefix, per_locale):
"""Output the per-prefix data in Python form to the given file """
with open(outfilename, "w") as outfile:
longest_prefix = 0
if per_locale:
prnt(PREFIXDATA_LOCALE_FILE_PROLOG % {'module': module_prefix}, file=outfile)
else:
prnt(PREFIXDATA_FILE_PROLOG % {'module': module_prefix}, file=outfile)
prnt(COPYRIGHT_NOTICE, file=outfile)
prnt("%s_DATA = {" % varprefix, file=outfile)
for prefix in sorted(prefixdata.keys()):
if len(prefix) > longest_prefix:
longest_prefix = len(prefix)
if per_locale:
prnt(" '%s':%s," % (prefix, _stable_dict_repr(prefixdata[prefix])), file=outfile)
else:
prnt(" '%s':%s," % (prefix, _tuple_repr(prefixdata[prefix])), file=outfile)
prnt("}", file=outfile)
prnt("%s_LONGEST_PREFIX = %d" % (varprefix, longest_prefix), file=outfile)
def _standalone(argv):
"""Parse the given input directory and emit generated code."""
varprefix = "GEOCODE"
per_locale = True
separator = None
try:
opts, args = getopt.getopt(argv, "hv:fs:", ("help", "var=", "flat", "sep="))
except getopt.GetoptError:
prnt(__doc__, file=sys.stderr)
sys.exit(1)
for opt, arg in opts:
if opt in ("-h", "--help"):
prnt(__doc__, file=sys.stderr)
sys.exit(1)
elif opt in ("-v", "--var"):
varprefix = arg
elif opt in ("-f", "--flat"):
per_locale = False
elif opt in ("-s", "--sep"):
separator = arg
else:
prnt("Unknown option %s" % opt, file=sys.stderr)
prnt(__doc__, file=sys.stderr)
sys.exit(1)
if len(args) != 3:
prnt(__doc__, file=sys.stderr)
sys.exit(1)
if per_locale:
prefixdata = load_locale_prefixdata(args[0], se
|
OpenGov/grid_walker
|
setup.py
|
Python
|
bsd-3-clause
| 1,495
| 0.002676
|
import os
from setuptools import setup
def read(fname):
with open(fname) as fhandle:
return fhandle.read()
def readMD(fname):
# Utility function to read the README file.
full_fname = os.path.join(os.path.dirname(__file__), fname)
if 'PANDOC_PATH' in os.environ:
import pandoc
pandoc.core.PANDOC_PATH = os.environ['PANDOC_PATH']
doc = pandoc.Document()
with open(full_fname) as fhandle:
doc.markdown = fhandle.read()
return doc.rst
else:
return read(fname)
required = [req.strip() for req in read('requirements.txt').splitlines() if req.strip()]
version = '1.0.1'
setup(
name='GridWalker',
version=version,
author='Matthew Seal',
author_email='mseal@opengov.com',
description='A multi-dimensional grid used for state space searching',
long_description=readMD('README.md'),
install_requires=required,
license='New BSD',
packages=['gridwalker'],
test_suite='tes
|
ts',
zip_safe=False,
url='https://github.com/OpenGov/grid_walker',
download_url='https://github.com/OpenGov/grid_walker/tarball/v' + version,
keywords=['grids'
|
, 'data', 'iterator', 'multi-dimensional'],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2 :: Only'
]
)
|
skevy/django
|
django/utils/datastructures.py
|
Python
|
bsd-3-clause
| 15,444
| 0.001684
|
import copy
from types import GeneratorType
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self)
|
.setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder
|
.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
|
amiraliakbari/grade-analyzer
|
pan/pan.py
|
Python
|
mit
| 414
| 0.007246
|
#!/usr/bin/env python
import re
import glob
class FileReader(object):
OPTION_LINE_RE = re.compile(r'^#!\s*(.+?)\s*=(.+?
|
)\s*$')
de
|
f __init__(self):
self.opts = {
'sep': ' ',
'cols': 'sum',
}
def read_option_line(self, l):
m = self.OPTION_LINE_RE.match(l)
if not m:
raise ValueError
k = m.group(1)
v = m.group(2)
|
charris/numpy
|
numpy/lib/mixins.py
|
Python
|
bsd-3-clause
| 7,052
| 0
|
"""Mixin classes for custom array types that don't inherit from ndarray."""
from numpy.core import umath as um
__all__ = ['NDArrayOperatorsMixin']
def _disables_array_ufunc(obj):
"""True when __array_ufunc__ is set to None."""
try:
return obj.__array_ufunc__ is None
except AttributeError:
return False
def _binary_method(ufunc, name):
"""Implement a forward binary method with a ufunc, e.g., __add__."""
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(self, other)
func.__name__ = '__{}__'.format(name)
return func
def _reflected_binary_method(ufunc, name):
"""Implement a reflected binary method with a ufunc, e.g., __radd__."""
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(other, self)
func.__name__ = '__r{}__'.format(name)
return func
def _inplace_binary_method(ufunc, name):
"""Implement an in-place binary method with a ufunc, e.g., __iadd__."""
def func(self, other):
return ufunc(self, other, out=(self,))
func.__name__ = '__i{}__'.format(name)
return func
def _numeric_methods(ufunc, name):
"""Implement forward, reflected and inplace binary methods with a ufunc."""
return (_binary_method(ufunc, name),
_reflected_binary_method(ufunc, name),
_inplace_binary_method(ufunc, name))
def _unary_method(ufunc, name):
"""Implement a unary special method with a ufunc."""
def func(self):
return ufunc(self)
func.__name__ = '__{}__'.format(name)
return func
class NDArrayOperatorsMixin:
"""Mixin defining all operator special methods using __array_ufunc__.
This class implements the special methods for almost all of Python's
builtin operators defined in the `operator` module, including comparisons
(``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
deferring to the ``__array_ufunc__`` method, which subclasses must
implement.
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
arrays as described in `A Mechanism for Overriding Ufuncs
<https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
As an trivial example, consider this implementation of an ``ArrayLike``
class that simply wraps a NumPy array and ensures that the result of any
arithmetic operation is also an ``ArrayLike`` object::
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
def __init__(self, value):
self.value = np.asarray(value)
# One might also consider adding the built-in list type to this
# list, to support operations like np.add(array_like, list)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.value if isinstance(x, ArrayLike) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
# one return value
return type(self)(result)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.value)
In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
the result is always another ``ArrayLike``:
>>> x = ArrayLike([1, 2, 3])
>>> x - 1
ArrayLike(array([0, 1, 2]))
>>> 1 - x
ArrayLike(array([ 0, -1, -2]))
>>> np.arange(3) - x
ArrayLike(array([-1, -1, -1]))
>>> x - np.arange(3)
ArrayLike(array([1, 1, 1]))
Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
with arbitrary, unrecognized types. This ensures that interactions with
ArrayLike preserve a well-defined casting hierarchy.
.. versionadded:: 1.13
"""
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
# overrides NEP.
# comparisons don't have reflected and in-place versions
__lt__ = _binary_method(um.less, 'lt')
__le__ = _binary_method(um.less_equal, 'le')
__eq__ = _binary_method(um.equal, 'eq')
__ne__ = _binary_method(um.not_equal, 'ne')
__gt__ = _binary_method(um.greater, 'gt')
__ge__ = _binary_method(um.greater_equal, 'ge')
# numeric methods
__add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
__sub__, __rsub__, __isub__ = _numeric_methods(
|
um.subtract, 'sub')
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul'
|
)
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
um.matmul, 'matmul')
# Python 3 does not use __div__, __rdiv__, or __idiv__
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
um.true_divide, 'truediv')
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
um.floor_divide, 'floordiv')
__mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
__divmod__ = _binary_method(um.divmod, 'divmod')
__rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
# __idivmod__ does not exist
# TODO: handle the optional third argument for __pow__?
__pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
__lshift__, __rlshift__, __ilshift__ = _numeric_methods(
um.left_shift, 'lshift')
__rshift__, __rrshift__, __irshift__ = _numeric_methods(
um.right_shift, 'rshift')
__and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
__xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
__or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
# unary methods
__neg__ = _unary_method(um.negative, 'neg')
__pos__ = _unary_method(um.positive, 'pos')
__abs__ = _unary_method(um.absolute, 'abs')
__invert__ = _unary_method(um.invert, 'invert')
|
Robpol86/terminaltables
|
terminaltables/ascii_table.py
|
Python
|
mit
| 2,734
| 0.004389
|
"""AsciiTable is the main table class. To be inherited by other tables. Define convenience methods here."""
from terminaltables.base_table import BaseTable
from terminaltables.terminal_io import terminal_size
from terminaltables.width_and_alignment import column_max_width, max_dimensions, table_width
class AsciiTable(BaseTable):
"""Draw a table using regular ASCII characters, such as ``+``, ``|``, and ``-``.
:ivar iter table_data: List (empty or list of lists of strings) representing the table.
:ivar str title: Optional title to show within the top border of the table.
:ivar bool inner_column_border: Separates columns.
:ivar bool inner_footing_row_border: Show a border before the last row.
:ivar bool inner_heading_row_border: Show a border after the first row.
:ivar bool inner_row_border: Show a border in between every row.
:ivar bool outer_border: Show the top, left, right, and bottom border.
:ivar dict justify_columns: Horizontal justification. Keys are column indexes (int). Values are right/left/center.
:ivar int padding_left: Number of spaces to pad on the left side of every cell.
:ivar int padding_right: Number of spaces to pad on the right side of every cell.
"""
def column_max_width(self, column_number):
"""Return the maximum width of a column based on the current terminal width.
:param int column_number: The column number to query.
:return: The max width of the column.
:rtype: int
"""
inner_widths = max_dimensions(self.table_data)[0]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
padding = self.padding_left + self.padding_right
return column_max_width(inner_widths, column_number, outer_border, inner_border, padding)
@property
def column_widths(self):
"""Return a list of integers representing the widths of each table column without padding."""
if not self.table_data:
return list()
return max_dimensions(self.table_data)[0]
@property
d
|
ef ok(self): # Too late to change API. # pylint: disable=invalid-name
"""Return True if the table fits within the terminal width, False if the table breaks."""
return self.table_width <= terminal_size()[0]
@property
def table_width(self):
"""Return the width of the tab
|
le including padding and borders."""
outer_widths = max_dimensions(self.table_data, self.padding_left, self.padding_right)[2]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
return table_width(outer_widths, outer_border, inner_border)
|
wreckJ/hamster
|
src/hamster/preferences.py
|
Python
|
gpl-3.0
| 22,492
| 0.006269
|
# -*- coding: utf-8 -*-
# Copyright (C) 2007, 2008, 2014 Toms Bauģis <toms.baugis at gmail.com>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import GObject as gobject
import datetime as dt
from gettext import ngettext
from hamster.lib.configuration import Controller
def get_prev(selection, model):
(model, iter) = selection.get_selected()
#previous item
path = model.get_path(iter)[0] - 1
if path >= 0:
return model.get_iter_from_string(str(path))
else:
return None
class CategoryStore(gtk.ListStore):
def __init__(self):
#id, name, color_code, order
gtk.ListStore.__init__(self, int, str)
def load(self):
category_list = runtime.storage.get_categories()
for category in category_list:
self.append([category['id'], category['name']])
self.unsorted_category = self.append([-1, _("Unsorted")]) # all activities without category
class ActivityStore(gtk.ListStore):
def __init__(self):
#id, name, category_id, order
gtk.ListStore.__init__(self, int, str, int)
def load(self, category_id):
self.clear()
if category_id is None:
return
activity_list = runtime.storage.get_category_activities(category_id)
for activity in activity_list:
self.append([activity['id'],
activity['name'],
activity['category_id']])
formats = ["fixed", "symbolic", "minutes"]
appearances = ["text", "icon", "both"]
from hamster.lib.configuration import runtime, conf
import widgets
from lib import stuff, trophies
class PreferencesEditor(Controller):
TARGETS = [
('MY_TREE_MODEL_ROW', gtk.TargetFlags.SAME_WIDGET, 0),
('MY_TREE_MODEL_ROW', gtk.TargetFlags.SAME_APP, 0),
]
def __init__(self, parent = None):
Controller.__init__(self, parent, ui_file="preferences.ui")
# Translators: 'None' refers here to the Todo list choice in Hamster preferences (Tracking tab)
self.activities_sources = [("", _("None")),
("evo", "Evolution"),
("gtg", "Getting Things Gnome")]
self.todo_combo = gtk.ComboBoxText()
for code, label in self.activities_sources:
self.todo_combo.append_text(label)
self.todo_combo.connect("changed", self.on_todo_combo_changed)
self.get_widget("todo_pick").add(self.todo_combo)
# create and fill activity tree
self.activity_tree = self.get_widget('activity_list')
self.get_widget("activities_label").set_mnemonic_widget(self.activity_tree)
self.activity_store = ActivityStore()
self.external_listeners = []
self.activityColumn = gtk.TreeViewColumn(_("Name"))
self.activityColumn.set_expand(True)
self.activityCell = gtk.Cel
|
lRendererText()
self.external_listeners.extend([
(self.activityCell, self.activityCell.connect('edited', self.activity_name_ed
|
ited_cb, self.activity_store))
])
self.activityColumn.pack_start(self.activityCell, True)
self.activityColumn.set_attributes(self.activityCell, text=1)
self.activityColumn.set_sort_column_id(1)
self.activity_tree.append_column(self.activityColumn)
self.activity_tree.set_model(self.activity_store)
self.selection = self.activity_tree.get_selection()
self.external_listeners.extend([
(self.selection, self.selection.connect('changed', self.activity_changed, self.activity_store))
])
# create and fill category tree
self.category_tree = self.get_widget('category_list')
self.get_widget("categories_label").set_mnemonic_widget(self.category_tree)
self.category_store = CategoryStore()
self.categoryColumn = gtk.TreeViewColumn(_("Category"))
self.categoryColumn.set_expand(True)
self.categoryCell = gtk.CellRendererText()
self.external_listeners.extend([
(self.categoryCell, self.categoryCell.connect('edited', self.category_edited_cb, self.category_store))
])
self.categoryColumn.pack_start(self.categoryCell, True)
self.categoryColumn.set_attributes(self.categoryCell, text=1)
self.categoryColumn.set_sort_column_id(1)
self.categoryColumn.set_cell_data_func(self.categoryCell, self.unsorted_painter)
self.category_tree.append_column(self.categoryColumn)
self.category_store.load()
self.category_tree.set_model(self.category_store)
selection = self.category_tree.get_selection()
self.external_listeners.extend([
(selection, selection.connect('changed', self.category_changed_cb, self.category_store))
])
self.day_start = widgets.TimeInput(dt.time(5,30))
self.get_widget("day_start_placeholder").add(self.day_start)
self.load_config()
# Allow enable drag and drop of rows including row move
self.activity_tree.enable_model_drag_source(gdk.ModifierType.BUTTON1_MASK,
self.TARGETS,
gdk.DragAction.DEFAULT|
gdk.DragAction.MOVE)
self.category_tree.enable_model_drag_dest(self.TARGETS,
gdk.DragAction.MOVE)
self.activity_tree.connect("drag_data_get", self.drag_data_get_data)
self.category_tree.connect("drag_data_received", self.on_category_drop)
#select first category
selection = self.category_tree.get_selection()
selection.select_path((0,))
self.prev_selected_activity = None
self.prev_selected_category = None
self.external_listeners.extend([
(self.day_start, self.day_start.connect("time-entered", self.on_day_start_changed))
])
self.show()
def show(self):
self.get_widget("notebook1").set_current_page(0)
self.window.show_all()
def on_todo_combo_changed(self, combo):
conf.set("activities_source", self.activities_sources[combo.get_active()][0])
def load_config(self, *args):
self.get_widget("shutdown_track").set_active(conf.get("stop_on_shutdown"))
self.get_widget("idle_track").set_active(conf.get("enable_timeout"))
self.get_widget("notify_interval").set_value(conf.get("notify_interval"))
self.get_widget("notify_on_idle").set_active(conf.get("notify_on_idle"))
self.get_widget("notify_on_idle").set_sensitive(conf.get("notify_interval") <=120)
day_start = conf.get("day_start_minutes")
day_start = dt.time(day_start / 60, day_start % 60)
self.day_start.set_time(day_start)
self.tags = [tag["name"] for tag in runtime.storage.get_tags(only_autocomplete=True)]
self.get_widget("autocomplete_tags").set_text(", ".join(self.tags))
current_source = conf.get("activities_source")
for i, (code, label) in enumerate(self.activities_sources):
if code == current_source:
self.todo_combo.set_active(i)
def on_autocomplete_tags_view_focus_out_event(self, view, event):
buf = self.get_widget("autocomplete_tags")
updated_tags = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), 0) \
|
brioscaibriste/iarnrod
|
coire.py
|
Python
|
gpl-3.0
| 6,003
| 0.01316
|
#!/usr/bin/python3
#
# Copyright: Conor O'Callghan 2016
# Version: v1.1.3
#
# Please feel free to fork this project, modify the code and improve
# it on the github repo https://github.com/brioscaibriste/iarnrod
#
# Powered by TfL Open Data
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
import tempfile
import time
import os
from urllib.request import urlopen
'''
ParseArgs
A simple function to parse the command line arguments passed to the function.
The function does very little sanitisation on the input variables. The
argument passed is then returned from the function.
'''
def ParseArgs():
# Parse our command line argument for the line name
parser = argparse.ArgumentParser()
parser.add_argument('--line',dest='LineName',help='Specify the London line you want to report on')
args = parser.parse_args()
# Check if the value is blank
Line = (args.LineName)
if not Line:
print ("\nError, you must specify a line name! e.g. --line district\n")
sys.exit(1)
# Convert the line name to lower case for easy comparison
Line = Line.lower()
# If the line isn't in the line list, fail badly
if Line not in ('district','circle','victoria','central','northern',
'bakerloo','hammersmith-city','jubilee','metropolitan',
'piccadilly','waterloo-city','dlr',):
print ("\nError, you have specified " + Line + " as your line. You must specify one of the following: "
"\n\tDistrict"
"\n\tCircle"
"\n\tVictora"
"\n\tCentral"
"\n\tNorthern"
"\n\tPiccadilly"
"\n\tBakerloo"
"\n\thammersmith-city"
"\n\twaterloo-city"
"\n\tDLR"
"\n\tMetropolitan"
"\n\tJubilee\n")
sys.exit(1)
# Convert the tube line back to upper case for nice display
Line = Line.upper()
return Line
'''
RetrieveTFLData
Inputs:
Line - Which line to retrieve information on
Run - Should the data retrieval be run or should the cache file be used
SFileName - The file in which to store the line status cache
This function takes the Line variable (a name of a Transport For London line
name) and polls the TFL API. The function then returns the current line
status for the specified line.
'''
def RetrieveTFLData(Line,Run,SFileName):
# TFL Unified API URL
TFLDataURL = "https://api.tfl.gov.uk/Line/" + Line + ("/Status?detail=False"
"&app_id=&app_key=")
if Run:
# Read all the information from JSON at the specified URL, can be re-done with requests?
RawData = urlopen(TFLDataURL).readall().decode('utf8') or die("Error, failed to "
"retrieve the data from the TFL website")
TFLData = json.loads(RawData)
# Sanitize the data to get the line status
Scratch = (TFLData[0]['lineStatuses'])
LineStatusData = (Scratch[0]['statusSeverityDescription'])
# Cache the staus in a file
with open(SFileName, 'w+') as SFile:
SFile.write(LineStatusData)
SFile.closed
else:
with open(SFileName, 'r+') as SFile:
LineStatusData = SFile.read()
SFile.closed
return LineStatusData
'''
Throttle
Inputs
PollIntervalMinutes - Polling interval in minutes
Throttle - Should we throttle the connection or not?
TFileName - The file where the timestamp for throttling usage is stored
This function is used to determine whet
|
her or not the next run of the retrieval of data should run.
It retrieves the previously run time from a file in /tmp if it exists, if the file does not exist
the run status will return as 1 and the current time stamp will be written into a new file.
If throttling is disabled, the file will be remo
|
ved from /tmp and run will be set to 1.
'''
def Throttle(PollIntervalMinutes,Throttling,TFileName):
if Throttling == "True":
# Current epoch time
# CurrentStamp = str(time.time()).split('.')[0]
CurrentStamp = int(time.time())
# Does the temporary file exist or not
if os.path.isfile(TFileName):
# Open the temp file and read the time stamp
with open(TFileName, 'r+') as TFile:
TimeFile = TFile.read()
Remainder = CurrentStamp - int(TimeFile)
else:
# Get the current time stamp and write it to the temp file
with open(TFileName, 'w') as TFile:
TFile.write(str(CurrentStamp))
# Set the Remainder high to force the next run
Remainder = 1000000
# If the remainder is less than the poll interval don't run the command, if it isn't run the command
if ( Remainder < (PollIntervalMinutes * 60) ):
Run = 0
else:
Run = 1
# Set the command to run and re-write the poll time to file
with open(TFileName, 'w') as TFile:
TFile.write(str(CurrentStamp))
return Run
else:
# Remove the time file if it exists
try:
os.remove(TFileName)
except OSError:
pass
Run = 1
return Run
|
Metaswitch/horizon
|
openstack_dashboard/contrib/sahara/content/data_processing/cluster_templates/workflows/create.py
|
Python
|
apache-2.0
| 12,426
| 0.000161
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from saharaclient.api import base as api_base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
from openstack_dashboard.contrib.sahara.content.data_processing. \
utils import helpers as helpers
from openstack_dashboard.contrib.sahara.content.data_processing. \
utils import anti_affinity as aa
import openstack_dashboard.contrib.sahara.content.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
class SelectPluginAction(workflows.Action):
hidden_create_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
def __init__(self, request, *args, **kwargs):
super(SelectPluginAction, self).__init__(request, *args, **kwargs)
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
exceptions.handle(request,
_("Unable to fetch plugin list."))
plugin_choices = [(plugin.name, plugin.title) for plugin in plugins]
self.fields["plugin_name"] = forms.ChoiceField(
label=_("Plugin name"),
choices=plugin_choices,
widget=forms.Select(attrs={"class": "plugin_name_choice"}))
for plugin in plugins:
field_name = plugin.name + "_version"
choice_field = forms.ChoiceField(
label=_("Version"),
choices=[(version, version) for version in plugin.versions],
widget=forms.Select(
attrs={"class": "plugin_version_choice "
+ field_name + "_choice"})
)
self.fields[field_name] = choice_field
class Meta(object):
name = _("Select plugin and hadoop version for cluster template")
help_text_template = ("project/data_processing.cluster_templates/"
"_create_general_help.html")
class SelectPlugin(workflows.Step):
action_class = SelectPluginAction
class CreateClusterTemplate(workflows.Workfl
|
ow):
slug = "create_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Next")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectPlugin,)
class GeneralConfigAction(workflows.Action):
hidden_configure_fie
|
ld = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_configure_field"}))
hidden_to_delete_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_to_delete_field"}))
cluster_template_name = forms.CharField(label=_("Template Name"))
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
anti_affinity = aa.anti_affinity_field()
def __init__(self, request, *args, **kwargs):
super(GeneralConfigAction, self).__init__(request, *args, **kwargs)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin
)
self.fields["hadoop_version"] = forms.CharField(
widget=forms.HiddenInput(),
initial=hadoop_version
)
populate_anti_affinity_choices = aa.populate_anti_affinity_choices
def get_help_text(self):
extra = dict()
plugin, hadoop_version = whelpers\
.get_plugin_and_hadoop_version(self.request)
extra["plugin_name"] = plugin
extra["hadoop_version"] = hadoop_version
return super(GeneralConfigAction, self).get_help_text(extra)
def clean(self):
cleaned_data = super(GeneralConfigAction, self).clean()
if cleaned_data.get("hidden_configure_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Details")
help_text_template = ("project/data_processing.cluster_templates/"
"_configure_general_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["general_" + k] = v
post = self.workflow.request.POST
context['anti_affinity_info'] = post.getlist("anti_affinity")
return context
class ConfigureNodegroupsAction(workflows.Action):
hidden_nodegroups_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_nodegroups_field"}))
forms_ids = forms.CharField(
required=False,
widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(ConfigureNodegroupsAction, self). \
__init__(request, *args, **kwargs)
plugin = request.REQUEST.get("plugin_name")
version = request.REQUEST.get("hadoop_version")
if plugin and not version:
version_name = plugin + "_version"
version = request.REQUEST.get(version_name)
if not plugin or not version:
self.templates = saharaclient.nodegroup_template_find(request)
else:
self.templates = saharaclient.nodegroup_template_find(
request, plugin_name=plugin, hadoop_version=version)
deletable = request.REQUEST.get("deletable", dict())
request_source = None
if 'forms_ids' in request.POST:
request_source = request.POST
elif 'forms_ids' in request.REQUEST:
request_source = request.REQUEST
if request_source:
self.groups = []
for id in json.loads(request_source['forms_ids']):
group_name = "group_name_" + str(id)
template_id = "template_id_" + str(id)
count = "count_" + str(id)
serialized = "serialized_" + str(id)
self.groups.append({"name": request_source[group_name],
"template_id": request_source[template_id],
"count": request_source[count],
"id": id,
"deletable": deletable.get(
request_source[group_name], "true"),
"serialized": request_source[serialized]})
whelpers.build_node_group_fields(self,
group_name,
template_id,
count,
serialized)
def clean(self):
cleaned_data = super(ConfigureNodegroupsAction, self).clean()
if cleaned_data.get("hidden_nodegroups_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Node Groups")
class ConfigureNodegroups(workflows.Step):
|
jvlomax/Beaker-bot
|
plugins/arewedone.py
|
Python
|
gpl-2.0
| 1,837
| 0.006532
|
__author__ = 'george'
from baseclass import Plugin
import time
from apscheduler.scheduler import Scheduler
class AreWeDone(Plugin):
def __init__(self, skype):
super(AreWeDone, self).__init__(skype)
self.command = "arewedoneyet"
self.sched = Scheduler()
self.sched.start()
self.sched.add_cron_job(self.set_topic, hour="*", minute=2, day_of_week="monun")
def message_received(self, args, status, msg):
cur_time = time.localtime()
if cur_time.tm_mday == 31 or cur_time.tm_mday == 1:
time_left = 1 - cur_time.tm_mday % 31
hours_left = 23 - cur_time.tm_hour
mins_left = 59 - cur_time.tm_min
msg.Chat.SendMessage("%d days, %d hours and %d mins left until we are done" % (time_left, hours_left, mins_left))
print "%d days, %d hours and %d mins left until we are done" % (time_left, hours_left, mins_left)
else:
msg.Chat.SendMessage("You are now done.
|
Please visit http://www.nav.n
|
o for more information")
def set_topic(self):
channel = "#stigrk85/$jvlomax;b43a0c90a2592b9b"
chat = self.skype.Chat(channel)
cur_time = time.localtime()
days_left = 1 - cur_time.tm_mday % 31
time_left = 24 - cur_time.tm_hour + days_left * 24
if cur_time.tm_hour >= 21 or cur_time.tm_hour < 6:
tod = "night"
else:
tod= "day"
if days_left > 0:
left = "second"
else:
left = "final"
if cur_time.tm_mday == 1:
chat.SendMessage("/topic {} of the {} day - {} hours remain".format(tod, left, time_left))
else:
chat.SendMessage("Congratulations, You have survived. Please visit http://www.nav.no for more information".format(tod, left, time_left))
|
Jumpscale/go-raml
|
codegen/fixtures/congo/python_server/server.py
|
Python
|
bsd-2-clause
| 425
| 0
|
# DO NOT EDIT THIS FILE. This file will be overwritt
|
en when re-running go-raml.
from app import app
import gevent
from gevent.pywsgi import WSGIServer
from gevent.pool import Pool
from gevent import monkey
import signal
monkey.patch_all()
server = WSGIServer(('', 5000), app, spawn=Pool(None))
def stop():
server.stop()
gevent.signal(signal.SIGINT, stop)
if __name__ == "__main__":
server.se
|
rve_forever()
|
michelesr/gasistafelice
|
gasistafelice/gf/base/const.py
|
Python
|
agpl-3.0
| 851
| 0.00235
|
from django.utils.translation import ugettext, ugettext_lazy as _
from django.conf import settings
STATE_CHOICES = [
('AN', 'Ancona'),
('AP', 'Ascoli Piceno'),
('FM', 'Fermo'),
('MC', 'Macerata'),
('PU', 'Pesaro Urbino')
]
SUPPLIER_FLAVOUR_LIST = [
('COMPANY', _('Company')),
('COOPERATING', _('Cooperating')
|
),
('FREELANCE', _('Freelance')),
]
MU_CHOICES = [('Km', 'Km')]
ALWAYS_AVAILABLE = 1000000000
PHONE = 'PHONE'
EMAIL = 'EMAIL'
FAX = 'FAX'
#WWW = 'WWW'
CONTACT_CHOICES = [
(PHONE, _('PHONE')),
(EMAIL, _('EMAIL')),
(FAX, _('FAX')),
]
# (WWW, _('WWW')),
DAY_CHOICES = [
('MONDAY', _('Monday')),
('TUESDAY', _('Tuesday')),
('WEDNESDAY', _('Wednesday')),
('THURSDAY', _('Thursday')),
('FRIDAY', _('Friday')),
('SATURDAY', _('Saturday')),
('SUNDAY', _('Sunday
|
')),
]
|
fluggo/Canvas
|
fluggo/editor/model/commands.py
|
Python
|
gpl-3.0
| 38,160
| 0.004271
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010-2 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections, itertools
from .items import *
from fluggo import logging
from PyQt4.QtCore import *
from PyQt4.QtGui import *
logger = logging.getLogger(__name__)
_Placement = collections.namedtuple('_Placement', 'min max index')
def _split_sequence_items_by_overlap(items):
'''Splits the *items* (which should all belong to the same sequence and be
sorted by index) into a list of lists of items, where items in each list overlap,
but do not overlap with items in the other lists (and thus each list can be
moved independently).'''
if not items:
return []
next_list = [items[0]]
result = [next_list]
for item in items[1:]:
if item.index != next_list[-1].index + 1 or next_list[-1].transition_length >= 0:
next_list = [item]
result.append(next_list)
else:
next_list.append(item)
return result
def _split_sequence_items_by_adjacency(items):
'''Splits the *items* (which should all belong to the same sequence and be
sorted by index) into a list of lists of items, where items in each list are
adjacent (have indexes that differ by one)'''
if not items:
return []
next_list = [items[0]]
result = [next_list]
start_offset = items[0].x
for item in items[1:]:
if item.index != next_list[-1].index + 1:
next_list = [item]
result.append(next_list)
else:
next_list.append(item)
return result
class SequenceItemsMover:
'''Class for moving any group of sequence items.
The items should either all belong to the same sequence or not belong to a
sequence at all. If they don't belong to a sequence, they need to already be
in the right order.'''
def __init__(self, items):
if items[0].sequence:
# Sort and set positions by index and x
items = sorted(items, key=lambda a: a.index)
base_x = items[0].x
self.overlap_movers = list(
SequenceOverlapItemsMover(group, group[0].x - base_x) for group in
_split_sequence_items_by_overlap(items))
else:
# Update _x attributes before we go into this
x = 0
index = 0
for item in items:
if index != 0:
x -= item.transition_length
item._x = x
item._index = index
x += item.length
index += 1
self.overlap_movers = list(
SequenceOverlapItemsMover(group, group[0].x) for group in
_split_sequence_items_by_overlap(items))
def to_item(self, height=10.0, x=0, y=0):
'''Return a space Item for containing the items from this SequenceItemsMover.
If there is one item, this will be a Clip. Otherwise, it will be a Sequence.
The items will be cloned for moving to the new sequence.'''
if len(self.overlap_movers) == 1 and len(self.overlap_movers[0].items) == 1:
# Make a clip
item = self.overlap_movers[0].items[0]
return Clip(
x=x, y=y,
length=item.length,
height=height,
type=item.type(),
source=item.source,
offset=item.offset,
in_motion=item.in_motion,
anchor=item.anchor)
seq_items = []
last_x = 0
for group in self.overlap_movers:
items = group.clone_items()
items[0].update(transition_length=-(group.offset - last_x))
seq_items.extend(items)
last_x = group.offset + group.length
return Sequence(x=x, y=y, type=seq_items[0].type(), items=seq_items,
height=height, in_motion=self.overlap_movers[0].items[0].in_motion)
class SequenceOverlapItemsMover:
'''Mover for overlapping items belonging to the same sequence.'''
def __init__(self, items, offset=None):
'''Creates an overlapping items mover with the given *items*.
*offset* can be used to store the group's offset from other groups.'''
self.items = items
self.offset = offset
# The items may not have x values, or they may not be accurate
# Calculate the long way
self.length = sum(items[i].length - (items[i].transition_length if i > 0 else 0)
for i in range(len(items)))
# max_fadeout_length: The maximum value for the next item's
# transition_length, which is the distance from the end of our last item
# to max(start of item, end of item's transition); since these items are
# overlappin
|
g, the last item must have a positive transition_length
self.max_fadeout_length = items[-1].length
# Ditto, but at the beginning of the items
self.max_fadein_length = items[0].length
if len(items) > 1:
|
self.max_fadeout_length -= items[-1].transition_length
self.max_fadein_length -= items[1].transition_length
def clone_items(self):
'''Clones all of this mover's items. The cloned items will be homeless.'''
return [item.clone() for item in self.items]
def clone(self):
'''Clones this mover and all of its items. The cloned items will be homeless.'''
return SequenceOverlapItemsMover(self.clone_items(), offset=self.offset)
@classmethod
def from_clip(cls, clip):
seq_item = SequenceItem(source=clip.source,
length=clip.length,
offset=clip.offset,
transition_length=0,
type=clip.type(),
in_motion=clip.in_motion)
return cls([seq_item])
class NoRoomError(Exception):
def __init__(self, message='There is no room for the item.', *args, **kw):
Exception.__init__(self, message, *args, **kw)
class AddOverlapItemsToSequenceCommand(QUndoCommand):
def __init__(self, sequence, mover, x, parent=None):
'''This is where the real add work is done. To add a clip to a sequence,
convert it to a sequence item and add it to a SequenceOverlapItemsMover.
*x* is space-relative. The clips should not belong to a sequence already.
If the given mover can be placed at two indexes, it it is put at the
lower index.'''
QUndoCommand.__init__(self, 'Add overlapping items to sequence', parent)
self.sequence = sequence
self.mover = mover
self.x = x
if self.sequence.type() != self.mover.items[0].type():
raise NoRoomError('The item type is incompatible with the sequence type.')
# We do a check here, but we'll be doing it again at the actual insert
if self.where_can_fit(x) is None:
raise NoRoomError
# BJC: Note that we don't calculate the new transition_lengths or keep
# the next item here; this is because the nearby items are allowed to
# change (in certain ways) between the creation of this command and its
# execution
self.orig_transition_length = self.mover.items[0].transition_length
def redo(self):
index = self.where_can_fit(self.x)
if index is None:
raise NoRoomError
self.index = index
x = self.x - self.sequence.x
self.orig_sequence_x = self.sequence.x
# at_index -
|
burzillibus/RobHome
|
venv/bin/rst2s5.py
|
Python
|
mit
| 671
| 0.00149
|
#!/home/vincenzo/Development/RobHome/venv/bin/python
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wi
|
emann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description
|
)
publish_cmdline(writer_name='s5', description=description)
|
google-research/google-research
|
etcmodel/models/openkp/run_finetuning_lib_test.py
|
Python
|
apache-2.0
| 7,530
| 0.001726
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_finetuning_lib."""
import tensorflow.compat.v1 as tf
from etcmodel.models.openkp import run_finetuning_lib
class RunFinetuningLibTest(tf.test.TestCase):
def test_dense_feature_scaler(self):
tensor = tf.constant([-15., -12., -20., 5., -100.])
expected = [0, 0.6, -1., 1., -1.]
scaler = run_finetuning_lib.DenseFeatureScaler(min_value=-20, max_value=-10)
self.assertAllClose(expected, scaler.transform(tensor))
def test_dense_feature_scaler_invalid_range(self):
with self.assertRaises(ValueError):
run_finetuning_lib.DenseFeatureScaler(min_value=10, max_value=5)
with self.assertRaises(ValueError):
run_finetuning_lib.DenseFeatureScaler(min_value=10, max_value=10)
def test_indicators_to_id(self):
indicator1 = tf.constant([0, 1, 0, 1], dtype=tf.int32)
indicator2 = tf.constant([1, 0, 0, 1], dtype=tf.int32)
indicator3 = tf.constant([0, 1, 1, 1], dtype=tf.int32)
expected = [2, 5, 1, 7]
self.assertAllEqual(
expected,
run_finetuning_lib.indicators_to_id(indicator1, indicator2, indicator3))
def test_gather_global_embeddings_to_long(self):
global_embeddings = [
[
[.1, -.1],
[.2, -.2],
[.3, -.3],
], #
[
[1.1, -1.1],
[1.2, -1.2],
[1.3, -1.3],
], #
[
[2.1, -2.1],
[2.2, -2.2],
[2.3, -2.3],
], #
]
long_vdom_idx = [
[0, 1, 1, 2, 2], #
[0, 0, 0, 1, 2], #
[0, 1, 2, 0, 0], # Padding can be 0 since their embedding is ignored.
]
expected = [
[
[.1, -.1],
[.2, -.2],
[.2, -.2],
[.3, -.3],
[.3, -.3],
], #
[
[1.1, -1.1],
[1.1, -1.1],
[1.1, -1.1],
[1.2, -1.2],
[1.3, -1.3],
], #
[
[2.1, -2.1],
[2.2, -2.2],
[2.3, -2.3],
[2.1, -2.1],
[2.1, -2.1],
], #
]
self.assertAllClose(
expected,
run_finetuning_lib.gather_global_embeddings_to_long(
global_embeddings, long_vdom_idx))
def test_batch_segment_sum_embeddings(self):
# batch_size = 2
# long_max_length = 8
# hidden_size = 2
long_embeddings = tf.constant(
[
[
[0.1, -0.1],
[0.2, -0.2],
[0.3, -0.3],
[0.4, -0.4],
[0.5, -0.5],
[100.0, -100.0], # Padding embeddings may be arbitrary.
[200.0, -200.0],
[300.0, -300.0],
], #
[
[1.1, -1.1],
[1.2, -1.2],
[1.3, -1.3],
[1.4, -1.4],
[1.5, -1.5],
[1.6, -1.6],
[400.0, 400.0], # Padding embeddings may be arbitrary.
[500.0, 500.0],
], #
],
dtype=tf.float32)
long_word_idx = tf.constant(
[
[0, 1, 2, 2, 3, 0, 0, 0], # Padding indices can just be 0.
[0, 0, 0, 1, 2, 2, 0, 0], # Padding indices can just be 0.
],
dtype=tf.int32)
long_input_mask = tf.constant(
[
[1, 1, 1, 1, 1, 0, 0, 0], #
[1, 1, 1, 1, 1, 1, 0, 0], #
],
dtype=tf.int32)
expected = [
[
[0.1, -0.1],
[0.2, -0.2],
[0.7, -0.7],
[0.5, -0.5],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
], #
[
[3.6, -3.6],
[1.4, -1.4],
[3.1, -3.1],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
], #
]
self.assertAllClose(
expected,
run_finetuning_lib.batch_segment_sum_embeddings(
long_embeddings=long_embeddings,
long_word_idx=long_word_idx,
long_input_mask=long_input_mask))
def test_make_ngram_labels(self):
label_start_idx = tf.constant([
[1, -1, -1],
[2, 3, 0],
])
label_phrase_len = tf.constant([
[3, -1, -1],
[2, 1, 2],
])
long_max_length = 4
kp_max_length = 5
reshaped_expected = [
[
[0, 0, 0, 0], # 1-grams
[0, 0, 0, 0], # 2-grams
[0, 1, 0, 0], # 3-grams
[0, 0, 0, 0], # 4-grams
[0, 0, 0, 0], # 5-grams
],
|
[
[0, 0, 0, 1 / 3], # 1-grams
[1 / 3, 0, 1 / 3, 0], # 2-grams
[0, 0, 0, 0], # 3-grams
[0, 0, 0, 0], # 4-grams
[0, 0, 0, 0], # 5-grams
],
]
batch_size = len(reshaped_expected)
expected = tf.reshape(reshaped_expected,
|
[batch_size, kp_max_length * long_max_length])
self.assertAllClose(
expected,
run_finetuning_lib.make_ngram_labels(
label_start_idx=label_start_idx,
label_phrase_len=label_phrase_len,
long_max_length=long_max_length,
kp_max_length=kp_max_length))
def test_make_ngram_labels_additive_smoothing(self):
label_start_idx = tf.constant([
[1, -1, -1],
[2, 3, 0],
])
label_phrase_len = tf.constant([
[3, -1, -1],
[2, 1, 2],
])
long_max_length = 4
kp_max_length = 5
additive_smoothing_mass = 1.0
smoothed_third = (1 / 3) + 0.05
reshaped_unnormalized_expected = [
[
[0.05, 0.05, 0.05, 0.05], # 1-grams
[0.05, 0.05, 0.05, 0.05], # 2-grams
[0.05, 1.05, 0.05, 0.05], # 3-grams
[0.05, 0.05, 0.05, 0.05], # 4-grams
[0.05, 0.05, 0.05, 0.05], # 5-grams
],
[
[0.05, 0.05, 0.05, smoothed_third], # 1-grams
[smoothed_third, 0.05, smoothed_third, 0.05], # 2-grams
[0.05, 0.05, 0.05, 0.05], # 3-grams
[0.05, 0.05, 0.05, 0.05], # 4-grams
[0.05, 0.05, 0.05, 0.05], # 5-grams
],
]
batch_size = len(reshaped_unnormalized_expected)
unnormalized_expected = tf.reshape(
reshaped_unnormalized_expected,
[batch_size, kp_max_length * long_max_length])
expected = (
unnormalized_expected /
tf.reduce_sum(unnormalized_expected, axis=-1, keepdims=True))
self.assertAllClose(
expected,
run_finetuning_lib.make_ngram_labels(
label_start_idx=label_start_idx,
label_phrase_len=label_phrase_len,
long_max_length=long_max_length,
kp_max_length=kp_max_length,
additive_smoothing_mass=additive_smoothing_mass))
if __name__ == '__main__':
tf.test.main()
|
thesharp/botogram
|
tests/test_decorators.py
|
Python
|
mit
| 535
| 0
|
"""
Tests for botogram/utils.py
Copyr
|
ight (c) 2015 Pietro Albini <pietro@pietroalbini.io>
Released under the MIT license
"""
import botogram.decorators
def test_help_message_for(bot):
@bot.command("test")
def func():
"""docstring"""
pass
cmd = {cmd.name: cmd for cmd in bot.available_commands()}["test"]
assert cmd.raw_docstring == "docstring"
@botogram.decorators.help_message_for(func)
def help_func():
return "functi
|
on"
assert cmd.raw_docstring == "function"
|
dbcls/dbcls-galaxy
|
test/functional/test_get_data.py
|
Python
|
mit
| 4,761
| 0.031086
|
import galaxy.model
from galaxy.model.orm import *
from base.twilltestcase import TwillTestCase
class UploadData( TwillTestCase ):
def test_000_upload_files_from_disk( self ):
"""Test uploading data files from disk"""
self.logout()
self.login( email='tst@bx.psu.edu' )
history1 = galaxy.model.History.query().order_by( desc( galaxy.model.History.table.c.create_time ) ).first()
self.upload_file( '1.bed' )
hda1 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda1 is not None, "Problem retrieving hda1 from database"
self.verify_dataset_correctness( '1.bed', hid=str( hda1.hid ) )
self.upload_file( '2.bed', dbkey='hg17' )
hda2 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda2 is not None, "Problem retrieving hda2 from database"
self.verify_dataset_correctness( '2.bed', hid=str( hda2.hid ) )
self.upload_file( '3.bed', dbkey='hg17', ftype='bed' )
|
hda3 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda3 is not None, "Problem retrieving hda3 from database"
self.verify_dataset_correctness( '3.bed', hid=str( hda3.hid ) )
self.uplo
|
ad_file( '4.bed.gz', dbkey='hg17', ftype='bed' )
hda4 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda4 is not None, "Problem retrieving hda4 from database"
self.verify_dataset_correctness( '4.bed', hid=str( hda4.hid ) )
self.upload_file( '1.scf', ftype='scf' )
hda5 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda5 is not None, "Problem retrieving hda5 from database"
self.verify_dataset_correctness( '1.scf', hid=str( hda5.hid ) )
self.upload_file( '1.scf.zip', ftype='binseq.zip' )
hda6 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda6 is not None, "Problem retrieving hda6 from database"
self.verify_dataset_correctness( '1.scf.zip', hid=str( hda6.hid ) )
self.delete_history( id=str( history1.id ) )
def test_005_url_paste( self ):
"""Test url paste behavior"""
# Deleting the current history should have created a new history
self.check_history_for_string( 'Your history is empty' )
history2 = galaxy.model.History.query().order_by( desc( galaxy.model.History.table.c.create_time ) ).first()
self.upload_url_paste( 'hello world' )
self.check_history_for_string( 'Pasted Entry' )
self.check_history_for_string( 'hello world' )
self.upload_url_paste( u'hello world' )
self.check_history_for_string( 'Pasted Entry' )
self.check_history_for_string( 'hello world' )
self.delete_history( id=str( history2.id ) )
def test_010_upload_encode_data( self ):
"""Test uploading encode data"""
# Deleting the current history should have created a new history
self.check_history_for_string( 'Your history is empty' )
history3 = galaxy.model.History.query().order_by( desc( galaxy.model.History.table.c.create_time ) ).first()
self.run_tool( 'encode_import_chromatin_and_chromosomes1', hg17=['cc.EarlyRepSeg.20051216.bed'] )
self.wait()
hda7 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda7 is not None, "Problem retrieving hda7 from database"
self.verify_dataset_correctness( 'cc.EarlyRepSeg.20051216.bed', hid=str( hda7.hid ) )
self.run_tool('encode_import_gencode1', hg17=['gencode.CDS.20051206.bed'])
self.wait()
hda8 = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
assert hda8 is not None, "Problem retrieving hda8 from database"
self.verify_dataset_correctness( 'sc_3D_cds.bed', hid=str( hda8.hid ) )
self.delete_history( id=str( history3.id ) )
def test_015_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
self.logout()
|
MooseDojo/myBFF
|
modules/jenkinsBrute.py
|
Python
|
mit
| 1,691
| 0.009462
|
#! /usr/bin/python
# this module is for jenkins attacks
from core.webModule import webModule
from requests import session
import requests
import re
from argparse import ArgumentParser
from lxml import html
import random
clas
|
s jenkinsBrute(webModule):
def __init__(self, config, display, lock):
super(jenkinsBrute, self).__init__(config, display, lock)
self.fingerprint="Jenkins"
self.respon
|
se="Success"
self.protocol="web"
def somethingCool(self, config, payload, proxy):
#Do something cool here
print("Something Cool not yet implemented")
def connectTest(self, config, payload, proxy, submitLoc, submitType):
#Create a session and check if account is valid
with session() as c:
requests.packages.urllib3.disable_warnings()
cookie = {'JSESSIONID.d29cad0c':'14qy4wdxentt311fbwxdw85z7o'}
resp1 = c.get(config["HOST"] + '/j_acegi_security_check', cookies=cookie, verify=False, data=payload, proxies=proxy)
print resp1.headers
cpost = c.post(config["HOST"] + '/employee/j_spring_security_check', cookies=cookies, data=payload, allow_redirects=True, verify=False,proxies=proxy)
m = re.search('You are unauthorized to access this page.', cpost.text)
if m:
print("[+] User Credentials Successful: " + config["USERNAME"] + ":" + config["PASSWORD"])
if not config["dry_run"]:
print("[!] Time to do something cool!")
self.somethingCool(config, payload)
else:
print("[-] Login Failed for: " + config["USERNAME"] + ":" + config["PASSWORD"])
|
grow/grow-ext-build-server
|
grow_build_server/emailer.py
|
Python
|
mit
| 1,438
| 0.002782
|
from google.appengine.ext import vendor
vendor.add('extensions')
from google.appengine.api import mail
import jinja2
import os
import premailer
_appid = os.getenv('APPLICATION_ID').replace('s~', '')
EMAIL_SENDER = '
|
noreply@{}.appspotmail.com'.format(_appid)
class Emailer(object):
def __init__(self, sender=None):
self.sender = sender or EMAIL_SENDER
def send(self, to, subject, template_path, kwargs=None):
html = self._render(template_path, kwargs=kwargs)
self._send(subject, to, html)
def _render(self, template_path, kwargs=None):
params = {}
if kwargs:
params.update(kwargs)
template = self.env.get_template(template_path)
html = templa
|
te.render(params)
return premailer.transform(html)
def _send(self, subject, to, html):
message = mail.EmailMessage(sender=self.sender, subject=subject)
message.to = to
message.html = html
message.send()
@property
def env(self):
path = os.path.join(os.path.dirname(__file__), 'templates')
loader = jinja2.FileSystemLoader([path])
extensions = [
'jinja2.ext.autoescape',
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_',
]
return jinja2.Environment(
loader=loader, extensions=extensions, autoescape=True,
trim_blocks=True)
|
joselamego/patchwork
|
patchwork/migrations/0003_series.py
|
Python
|
gpl-2.0
| 2,993
| 0.003007
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('patchwork', '0002_fix_patch_state_default_values'),
]
operations = [
migrations.CreateModel(
name='Series',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'Series without cover letter', max_length=200)),
('submitted', models.DateTimeField(default=datetime.datetime.now)),
('last_updated', models.DateTimeField(auto_now=True)),
('version', models.IntegerField(default=1)),
('n_patches', models.IntegerField(default=0)),
('project', models.ForeignKey(to='patchwork.Project')),
('reviewer', models.ForeignKey(related_name='reviewers', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('submitter', models.ForeignKey(related_name='submitters', to='patchwork.Person')),
],
),
migrations.CreateModel(
name='SeriesRevision',
fields=[
('id', models.AutoField(verbo
|
se_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.IntegerField(
|
default=1)),
('root_msgid', models.CharField(max_length=255)),
('cover_letter', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['version'],
},
),
migrations.CreateModel(
name='SeriesRevisionPatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField()),
('patch', models.ForeignKey(to='patchwork.Patch')),
('revision', models.ForeignKey(to='patchwork.SeriesRevision')),
],
options={
'ordering': ['order'],
},
),
migrations.AddField(
model_name='seriesrevision',
name='patches',
field=models.ManyToManyField(to='patchwork.Patch', through='patchwork.SeriesRevisionPatch'),
),
migrations.AddField(
model_name='seriesrevision',
name='series',
field=models.ForeignKey(to='patchwork.Series'),
),
migrations.AlterUniqueTogether(
name='seriesrevisionpatch',
unique_together=set([('revision', 'order'), ('revision', 'patch')]),
),
migrations.AlterUniqueTogether(
name='seriesrevision',
unique_together=set([('series', 'version')]),
),
]
|
clinton-hall/nzbToMedia
|
libs/common/babelfish/country.py
|
Python
|
gpl-3.0
| 3,242
| 0.001851
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from collections import namedtuple
from functools import partial
from pkg_resources import resource_stream # @UnresolvedImport
from .converters import ConverterManager
from . import basestr
COUNTRIES = {}
COUNTRY_MATRIX = []
#: The na
|
medtuple used in the :data:`COUNTRY_MATRIX`
IsoCountry = namedtuple('IsoCountry', ['name', 'alpha2'])
f = resource_stream('babelfish', 'data/iso-3166-1.txt')
f.readline()
for l in f:
iso_country = IsoCountry(*l.decode('utf-8').strip().split(';'))
COUNTRIES[iso_country.alpha2] = iso_country.name
COUNTRY_MATRIX.append(iso_country)
f.close()
class CountryConverterManager(ConverterManager):
""":class:`~babelfish.co
|
nverters.ConverterManager` for country converters"""
entry_point = 'babelfish.country_converters'
internal_converters = ['name = babelfish.converters.countryname:CountryNameConverter']
country_converters = CountryConverterManager()
class CountryMeta(type):
"""The :class:`Country` metaclass
Dynamically redirect :meth:`Country.frommycode` to :meth:`Country.fromcode` with the ``mycode`` `converter`
"""
def __getattr__(cls, name):
if name.startswith('from'):
return partial(cls.fromcode, converter=name[4:])
return type.__getattribute__(cls, name)
class Country(CountryMeta(str('CountryBase'), (object,), {})):
"""A country on Earth
A country is represented by a 2-letter code from the ISO-3166 standard
:param string country: 2-letter ISO-3166 country code
"""
def __init__(self, country):
if country not in COUNTRIES:
raise ValueError('%r is not a valid country' % country)
#: ISO-3166 2-letter country code
self.alpha2 = country
@classmethod
def fromcode(cls, code, converter):
"""Create a :class:`Country` by its `code` using `converter` to
:meth:`~babelfish.converters.CountryReverseConverter.reverse` it
:param string code: the code to reverse
:param string converter: name of the :class:`~babelfish.converters.CountryReverseConverter` to use
:return: the corresponding :class:`Country` instance
:rtype: :class:`Country`
"""
return cls(country_converters[converter].reverse(code))
def __getstate__(self):
return self.alpha2
def __setstate__(self, state):
self.alpha2 = state
def __getattr__(self, name):
try:
return country_converters[name].convert(self.alpha2)
except KeyError:
raise AttributeError(name)
def __hash__(self):
return hash(self.alpha2)
def __eq__(self, other):
if isinstance(other, basestr):
return str(self) == other
if not isinstance(other, Country):
return False
return self.alpha2 == other.alpha2
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<Country [%s]>' % self
def __str__(self):
return self.alpha2
|
dbnicholson/merge-our-misc
|
generate_diffs.py
|
Python
|
gpl-3.0
| 4,253
| 0.006115
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generate-diffs.py - generate changes and diff files for new packages
#
# Copyright © 2008 Canonical Ltd.
# Author: Scott James Remnant <scott@ubuntu.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from momlib import *
from util import tree, run
from model.base import (Distro, PackageVersion)
import config
def options(parser):
parser.add_option("-t", "--target", type="string", metavar="TARGET",
default=None,
help="Process only this distribution target")
logger = logging.getLogger('generate_diffs')
def main(options, args):
logger.info('Comparing current and previous versions in source distros...')
# For latest version of each package in the given distributions, iterate the pool in order
# and generate a diff from the previous version and a changes file
for target in config.targets(args):
d = target.distro
for source in d.newestSources(target.dist, target.component):
if options.package and source['Package'] not in options.package:
continue
if source['Package'] in target.blacklist:
logger.debug("%s is blacklisted, skipping", source['Package'])
continue
try:
pkg = d.package(target.dist, target.component, source['Package'])
except model.error.PackageNotFound, e:
logger.exception("Spooky stuff going on with %s.", d)
continue
sources = pkg.poolDirectory().getSourceStanzas()
version_sort(sources)
last = None
try:
for version in pkg.poolDirectory().getVersions():
pv = PackageVersion(pkg, version)
try:
generate_diff(last, pv)
except model.error.PackageNotFound:
logger.exception("Could not find a package to diff against.")
except ValueError:
logger.exception("Could not find a .dsc file, perhaps it moved components?")
finally:
if last is not None:
cleanup_source(last.getSources())
last = pv
finally:
if last is not None:
cleanup_source(last.getSources())
def generate_diff(last, this):
"""Generate the differences."""
changes_filename = changes_file(this.package.distro, this.getSources())
if last is None:
return
if not os.path.isfile(changes_filename) \
and not os.path.isfile(changes_filename + ".bz2"):
try:
unpack_source(this)
except ValueError:
logger.exception("Couldn't unpack %s.", this)
return
try:
save_changes_file(changes_filename, this.getSources(),
last.getSources())
logger.info("Saved changes file: %s",
tree.subdir(ROOT, changes_filename))
except (ValueError, OSError):
logger.error("dpkg-genchanges for %s failed",
tree.subdir(ROOT, changes_filename))
logger.debug("Producing diff from %s to %s", this, last)
diff_filename = diff_file(this.package.distro.name, this.getSources())
if not os.path.isfile(diff_filename) \
and not os.path.isfile(diff_filename + ".bz2"):
unpack_source(this)
|
unpack_source(las
|
t)
save_patch_file(diff_filename, last.getSources(), this.getSources())
save_basis(diff_filename, last.getSources()["Version"])
logger.info("Saved diff file: %s", tree.subdir(ROOT, diff_filename))
if __name__ == "__main__":
run(main, options, usage="%prog [DISTRO...]",
description="generate changes and diff files for new packages")
|
pchaitat/invrcptexporter
|
sample-code/misc/hello_world_librecalc.py
|
Python
|
gpl-3.0
| 1,240
| 0.006452
|
#!/usr/bin/env python3
# export_to_pdf.py
#
# references
# - https://onesheep.org/scripting-libreoffice-python/
# - http://christopher5106.github.io/office/2015/12/06/openoffice-lib
# reoffice-automate-your-office-tasks-with-python-macros.html
import uno
from com.sun.star.beans import PropertyValue
localContext = uno.getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", localContext)
context = resolver.resolve(
"uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext")
desktop = context.ServiceManager.createInstanceWithContext(
"com.sun.star.frame.Desktop", context)
model = desktop.getCurrentComponent()
# pdf
#properties=[]
#p=PropertyValue()
#p.Name='FilterName'
#p.Value='calc_pdf_Export'
#properties.append(p)
#model.storeToURL('file:///home/ubuntu/tmp/testout.pdf',tuple(properties))
model.cl
|
ose(True)
# access the active sheet
active_sheet = model.CurrentController.Activ
|
eSheet
# access cell C4
cell1 = active_sheet.getCellRangeByName("C4")
# set text inside
cell1.String = "Hello world"
# other example with a value
cell2 = active_sheet.getCellRangeByName("E6")
cell2.Value = cell2.Value + 1
import sys
sys.exit(0)
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/valueset.py
|
Python
|
bsd-3-clause
| 21,281
| 0.007753
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ValueSet) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class ValueSet(domainresource.DomainResource):
""" A set of codes drawn from one or more code systems.
A value set specifies a set of codes drawn from one or more code systems.
"""
|
resource_type = "ValueSet"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool
|
strict: If True (the default), invalid variables will raise a TypeError
"""
self.compose = None
""" Definition of the content of the value set (CLD).
Type `ValueSetCompose` (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date this was last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the value set.
Type `str`. """
self.expansion = None
""" Used when the value set is "expanded".
Type `ValueSetExpansion` (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.extensible = None
""" Whether this is intended to be used with an extensible binding.
Type `bool`. """
self.identifier = None
""" Additional identifier for the value set.
List of `Identifier` items (represented as `dict` in JSON). """
self.immutable = None
""" Indicates whether or not any change to the content logical
definition may occur.
Type `bool`. """
self.jurisdiction = None
""" Intended jurisdiction for value set (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.name = None
""" Name for this value set (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this value set is defined.
Type `str`. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.title = None
""" Name for this value set (human friendly).
Type `str`. """
self.url = None
""" Logical URI to reference this value set (globally unique).
Type `str`. """
self.useContext = None
""" Context the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the value set.
Type `str`. """
super(ValueSet, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSet, self).elementProperties()
js.extend([
("compose", "compose", ValueSetCompose, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("expansion", "expansion", ValueSetExpansion, False, None, False),
("experimental", "experimental", bool, False, None, False),
("extensible", "extensible", bool, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("immutable", "immutable", bool, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("name", "name", str, False, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("status", "status", str, False, None, True),
("title", "title", str, False, None, False),
("url", "url", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class ValueSetCompose(backboneelement.BackboneElement):
""" Definition of the content of the value set (CLD).
A set of criteria that define the content logical definition of the value
set by including or excluding codes from outside this value set. This I
also known as the "Content Logical Definition" (CLD).
"""
resource_type = "ValueSetCompose"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.exclude = None
""" Explicitly exclude codes from a code system or other value sets.
List of `ValueSetComposeInclude` items (represented as `dict` in JSON). """
self.inactive = None
""" Whether inactive codes are in the value set.
Type `bool`. """
self.include = None
""" Include one or more codes from a code system or other value set(s).
List of `ValueSetComposeInclude` items (represented as `dict` in JSON). """
self.lockedDate = None
""" Fixed date for version-less references (transitive).
Type `FHIRDate` (represented as `str` in JSON). """
super(ValueSetCompose, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ValueSetCompose, self).elementProperties()
js.extend([
("exclude", "exclude", ValueSetComposeInclude, True, None, False),
("inactive", "inactive", bool, False, None, False),
("include", "include", ValueSetComposeInclude, True, None, True),
("lockedDate", "lockedDate", fhirdate.FHIRDate, False, None, False),
])
return js
class ValueSetComposeInclude(backboneelement.BackboneElement):
""" Include one or more codes from a code system or other value set(s).
"""
resource_type = "ValueSetComposeInclude"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.concept = None
""" A concept defined in the system.
List of `ValueSetComposeIncludeConcept` items (represented as `dict` in JSON). """
self.filter = None
""" Select codes/concepts by their properties (including relationships).
List of `ValueSetComposeIncludeFilter` items (represented as `dict` in JSON). """
self.system = None
""" The system the codes come from.
Type `str`. """
self.valueSet = None
""" Select only contents included in this value set.
List of `str` items. """
self.v
|
VerifiableRobotics/LTLMoP
|
src/lib/execute.py
|
Python
|
gpl-3.0
| 17,245
| 0.005683
|
#!/usr/bin/env python
""" =================================================
execute.py - Top-level hybrid controller executor
=================================================
This module executes a hybrid controller for a robot in a simulated or real environment.
:Usage: ``execute.py [-hn] [-p listen_port] [-a automaton_file] [-s spec_file]``
* The controlling automaton is imported from the specified ``automaton_file``.
* The supporting handler modules (e.g. sensor, actuator, motion control, simulation environment initialization, etc)
are loaded according to the settings in the config file specified as current in the ``spec_file``.
* If no port to listen on is specified, an open one will be chosen randomly.
* Unless otherwise specified with the ``-n`` or ``--no_gui`` option, a status/control window
will also be opened for informational purposes.
"""
import sys, os, getopt, textwrap
import threading, subprocess, time
# Climb the tree to find out where we are
p = os.path.abspath(__file__)
t = ""
while t != "src":
(p, t) = os.path.split(p)
if p == "":
print "I have no idea where I am; this is ridiculous"
sys.exit(1)
sys.path.append(os.path.join(p,"src","lib"))
import fsa, project
import handlerSubsystem
import strategy
from copy import deepcopy
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import xmlrpclib
import socket
import random
import math
import traceback
from resynthesis import ExecutorResynthesisExtensions
from executeStrategy import ExecutorStrategyExtensions
import globalConfig, logging
####################
# HELPER FUNCTIONS #
####################
def usage(script_name):
""" Print command-line usage information. """
print textwrap.dedent("""\
Usage: %s [-hn] [-p listen_port] [-a automaton_file] [-s spec_file]
-h, --help:
Display this message
-n, --no-gui:
Do not show status/control window
-p PORT, --xmlrpc-listen-port PORT:
Listen on PORT for XML-RPC calls
-a FILE, --aut-file FILE:
Load automaton from FILE
-s FILE, --spec-file FILE:
Load experiment configuration from FILE """ % script_name)
class LTLMoPExecutor(ExecutorStrategyExtensions,ExecutorResynthesisExtensions, object):
"""
This is the main execution object, which combines the synthesized discrete automaton
with a set of handlers (as specified in a .config file) to create and run a hybrid controller
"""
def __init__(self):
"""
Create a new execution context object
"""
super(LTLMoPExecutor, self).__init__()
self.proj = project.Project() # this is the project that we are currently using to execute
self.strategy = None
# Choose a timer func with maximum accuracy for given platform
if sys.platform in ['win32', 'cygwin']:
self.timer_func = time.clock
else:
self.timer_func = time.time
self.externalEventTarget = None
self.externalEventTargetRegistered = threading.Event()
self.postEventLock = threading.Lock()
self.runStrategy = threading.Event() # Start out paused
self.alive = threading.Event()
self.alive.set()
self.current_outputs = {} # keep track on current outputs values (for actuations)
def postEvent(self, eventType, eventData=None):
""" Send a notice that an event occurred, if anyone wants it """
with self.postEventLock:
if self.externalEventTarget is None:
return
try:
self.externalEventTarget.handleEvent(eventType, eventData)
except socket.error as e:
logging.warning("Could not send event to remote event target: %s", e)
logging.warning("Forcefully unsubscribing target.")
self.externalEventTarget = None
def loadSpecFile(self, filename):
# Update with this new project
self.proj = project.Project()
self.proj.loadProject(filename)
self.hsub = handlerSubsystem.HandlerSubsystem(self, self.proj.project_root)
# Tell GUI to load the spec file
self.postEvent("SPEC", self.proj.getFilenamePrefix() + ".spec")
def loadAutFile(self, filename):
"""
This function loads the the .aut/.bdd file named filename and returns the strategy object.
filename (string): name of the file with path included
"""
region_domain = strategy.Domain("region", self.proj.rfi.regions, strategy.Domain.B0_IS_MSB)
strat = strategy.createStrategyFromFile(filename,
self.proj.enabled_sensors,
self.proj.enabled_actuators + self.proj.all_customs + [region_domain])
return strat
def _getCurrentRegionFromPose(self, rfi=None):
# TODO: move this to regions.py
if rfi is None:
rfi = self.proj.rfi
pose = self.hsub.coordmap_lab2map(self.hsub.getPose())
region = next((i for i, r in enumerate(rfi.regions) if r.name.lower() != "boundary" and \
r.objectContainsPoint(*pose)), None)
if region is None:
logging.warning("Pose of {} not inside any region!".format(pose))
return region
def shutdown(self):
self.runStrategy.clear()
logging.info("QUITTING.")
all_handler_types = ['init', 'pose', 'locomotionCommand', 'drive', 'motionControl', 'sensor', 'actuator']
for htype in all_handler_types:
logging.info("Terminating {} handler...".format(htype))
if htype in self.proj.h_instance:
if isinstance(self.proj.h_instance[htype], dict):
handlers = [v fo
|
r k,v in self.proj.h_instance[htype].iteritems()]
else:
handlers = [self.proj.h_instance[htype]]
for h in handlers:
if hasattr(h, "_stop"):
|
logging.debug("Calling _stop() on {}".format(h.__class__.__name__))
h._stop()
else:
logging.debug("{} does not have _stop() function".format(h.__class__.__name__))
else:
logging.debug("{} handler not found in h_instance".format(htype))
self.alive.clear()
def pause(self):
""" pause execution of the automaton """
self.runStrategy.clear()
time.sleep(0.1) # Wait for FSA to stop
self.postEvent("PAUSE")
def resume(self):
""" start/resume execution of the automaton """
self.runStrategy.set()
def isRunning(self):
""" return whether the automaton is currently executing """
return self.runStrategy.isSet()
def registerExternalEventTarget(self, address):
self.externalEventTarget = xmlrpclib.ServerProxy(address, allow_none=True)
# Redirect all output to the log
redir = RedirectText(self.externalEventTarget.handleEvent)
sys.stdout = redir
sys.stderr = redir
self.externalEventTargetRegistered.set()
def initialize(self, spec_file, strategy_file, firstRun=True):
"""
Prepare for execution, by loading and initializing all the relevant files (specification, map, handlers, strategy)
If `firstRun` is true, all handlers will be imported; otherwise, only the motion control handler will be reloaded.
"""
# load project only first time; otherwise self.proj is modified in-place
# TODO: make this less hacky
if firstRun:
self.loadSpecFile(spec_file)
if self.proj.compile_options['decompose']:
self.proj.rfiold = self.proj.rfi # Save the undecomposed regions
|
lmr/avocado-vt
|
virttest/step_editor.py
|
Python
|
gpl-2.0
| 50,744
| 0.000158
|
#!/usr/bin/python
"""
Step file creator/editor.
:copyright: Red Hat Inc 2009
:author: mgoldish@redhat.com (Michael Goldish)
"""
import os
import glob
import shutil
import sys
import logging
import pygtk
import gtk
from virttest import ppm_utils
pygtk.require('2.0')
# General utilities
def corner_and_size_clipped(startpoint, endpoint, limits):
c0 = startpoint[:]
c1 = endpoint[:]
if c0[0] < 0:
c0[0] = 0
if c0[1] < 0:
c0[1] = 0
if c1[0] < 0:
c1[0] = 0
if c1[1] < 0:
c1[1] = 0
if c0[0] > limits[0] - 1:
c0[0] = limits[0] - 1
if c0[1] > limits[1] - 1:
c0[1] = limits[1] - 1
if c1[0] > limits[0] - 1:
c1[0] = limits[0] - 1
if c1[1] > limits[1] - 1:
c1[1] = limits[1] - 1
return ([min(c0[0], c1[0]),
min(c0[1], c1[1])],
[abs(c1[0] - c0[0]) + 1,
abs(c1[1] - c0[1]) + 1])
def key_event_to_qemu_string(event):
keymap = gtk.gdk.keymap_get_default()
keyvals = keymap.get_entries_for_keycode(event.hardware_keycode)
keyval = keyvals[0][0]
keyname = gtk.gdk.keyval_name(keyval)
keymap = {"Return": "ret",
"Tab": "tab",
"space": "spc",
"Left": "left",
"Right": "right",
"Up": "up",
"Down": "down",
"F1": "f1",
"F2": "f2",
"F3": "f3",
"F4": "f4",
"F5": "f5",
"F6": "f6",
"F7": "f7",
"F8": "f8",
"F9": "f9",
"F10": "f10",
"F11": "f11",
"F12": "f12",
"Escape": "esc",
"minus": "minus",
"equal": "equal",
"BackSpace": "backspace",
"comma": "comma",
"period": "dot",
"slash": "slash",
"Insert": "insert",
"Delete": "delete",
"Home": "home",
"End": "end",
"Page_Up": "pgup",
"Page_Down": "pgdn",
"Menu": "menu",
"semicolon": "0x27",
"backslash": "0x2b",
"apostrophe": "0x28",
"grave": "0x29",
"less": "0x2b",
"bracketleft": "0x1a",
"bracketright": "0x1b",
"Super_L": "0xdc",
"Super_R": "0xdb",
}
if ord('a') <= keyval <= ord('z') or ord('0') <= keyval <= ord('9'):
sr = keyname
elif keyname in list(keymap.keys()):
sr = keymap[keyname]
else:
return ""
if event.state & gtk.gdk.CONTROL_MASK:
sr = "ctrl-" + str
if event.state & gtk.gdk.MOD1_MASK:
sr = "alt-" + str
if event.state & gtk.gdk.SHIFT_MASK:
sr = "shift-" + str
return sr
class StepMakerWindow(object):
def __init__(self):
# Window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Step Maker Window")
self.window.connect("delete-event", self.delete_event)
self.window.connect("destroy", self.destroy)
self.window.set_default_size(600, 800)
# Main box (inside a frame which is inside a VBox)
self.menu_vbox = gtk.VBox()
self.window.add(self.menu_vbox)
self.menu_vbox.show()
frame = gtk.Frame()
frame.set_border_width(10)
frame.set_shadow_type(gtk.SHADOW_NONE)
self.menu_vbox.pack_end(frame)
frame.show()
self.main_vbox = gtk.VBox(spacing=10)
frame.add(self.main_vbox)
self.main_vbox.show()
# EventBox
self.scrolledwindow = gtk.ScrolledWindow()
self.scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
self.scrolledwindow.set_shadow_type(gtk.SHADOW_NONE)
self.main_vbox.pack_start(self.scrolledwindow)
self.scrolledwindow.show()
table = gtk.Table(1, 1)
self.scrolledwindow.add_with_viewport(table)
table.show()
table.realize()
self.event_box = gtk.EventBox()
table.attach(self.event_box, 0, 1, 0, 1, gtk.EXPAND, gtk.EXPAND)
self.event_box.show()
self.event_box.realize()
# Image
self.image = gtk.Image()
self.event_box.add(self.image)
self.image.show()
# Data VBox
self.data_vbox = gtk.VBox(spacing=10)
self.main_vbox.pack_start(self.data_vbox, expand=False)
self.data_vbox.show()
# User VBox
self.user_vbox = gtk.VBox(spacing=10)
self.main_vbox.pack_start(self.user_vbox, expand=False)
self.user_vbox.show()
# Screendump ID HBox
box = gtk.HBox(spacing=10)
self.data_vbox.pack_start(box)
box.show()
label = gtk.Label("Screendump ID:")
box.pack_start(label, False)
label.show()
self.entry_screendump = gtk.Entry()
self.entry_screendump.set_editable(False)
box.pack_start(self.entry_screendump)
self.entry_screendump.show()
label = gtk.Label("Time:")
box.pack_start(label, False)
label.show()
self.entry_time = gtk.Entry()
self.entry_time.set_editable(False)
self.entry_time.set_width_chars(10)
box.pack_start(self.entry_time, False)
self.entry_time.show()
# Comment HBox
box = gtk.HBox(spacing=10)
self.data_vbox.pack_start(box)
box.show()
label = gtk.Label("Comment:")
box.pack_start(label, False)
label.show()
self.entry_comment = gtk.Entry()
box.pack_start(self.entry_comment)
self.entry_comment.show()
# Sleep HBox
box = gtk.HBox(spacing=10)
self.data_vbox.pack_start(box)
box.show()
self.check_sleep = gtk.CheckButton("Sleep:")
self.check_sleep.connect("toggled", self.event_check_sleep_toggled)
box.pack_start(self.check_sleep, False)
self.check_sleep.show()
self.spin_sleep = gtk.SpinButton(gtk.Adjustment(0, 0, 50000, 1, 10, 0),
climb_rate=0.0)
box.pack_start(self.spin_sleep, False)
self.spin_sleep.show()
# Barrier HBox
box = gtk.HBox(spacing=10)
self.data_vbox.pack_start(box)
box.show()
self.check_barrier = gtk.CheckButton("Barrier:")
self.check_barrier.connect("toggled", self.event_check_barrier_toggled)
box.pack_start(self.check_barrier, False)
self.check_barrier.show()
vbox = gtk.VBo
|
x()
box.pack_start(vbox)
vbox.show()
self.label_barrier_region = gtk.Label("Region:")
self.label_barrier_region.set_alignment(0, 0.5)
vbox.pack_start(self.label_barrier_region)
self.label_barrier_region.show()
self.l
|
abel_barrier_md5sum = gtk.Label("MD5:")
self.label_barrier_md5sum.set_alignment(0, 0.5)
vbox.pack_start(self.label_barrier_md5sum)
self.label_barrier_md5sum.show()
self.label_barrier_timeout = gtk.Label("Timeout:")
box.pack_start(self.label_barrier_timeout, False)
self.label_barrier_timeout.show()
self.spin_barrier_timeout = gtk.SpinButton(gtk.Adjustment(0, 0, 50000,
1, 10, 0),
climb_rate=0.0)
box.pack_start(self.spin_barrier_timeout, False)
self.spin_barrier_timeout.show()
self.check_barrier_optional = gtk.CheckButton("Optional")
box.pack_start(self.check_barrier_optional, False)
self.check_barrier_optional.show()
# Keystrokes HBox
box = gtk.HBox(spacing=10)
self.data_vbox.pack_start(box)
box.show()
label = gtk.Label("Keystrokes:")
box.pack_start(label, False)
label.show()
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
box.pack_start(frame)
frame.show()
self.text_buffer = gtk.TextBuffer()
|
RealTimeWeb/Blockpy-Server
|
controllers/services.py
|
Python
|
mit
| 2,386
| 0.003353
|
import logging
from pprint import pprint
from flask_wtf import Form
from wtforms import IntegerField, BooleanField
from flask import Blueprint, send_from_directory
from flask import Flask, redirect, url_for, session, request, jsonify, g,\
make_response, Response, render_template
from werkzeug.utils import secure_filename
from sqlalchemy import Date, cast, func, desc, or_
from main import app
from controllers.helpers import crossdomain
from interaction_logger import StructuredEvent
services = Blueprint('services', __name__, url_prefix='/services')
from controllers.service_libraries import weather as weather_service
@services.route('/weather/', methods=['GET', "POST"])
@services.route('/weather', methods=['GET', 'POST'])
def weather():
function = request.args.get("function", "get_temperature")
city = request.args.get("city", "B
|
lacksburg, VA")
weather_function = getattr(weather_service, function)
return jsonify(data=weather_function(city))
@services.route('/sheets', methods=['GET'])
def sheets(sheet_url):
sheet_id = ''
if sheet
|
_url.startswith('http'):
sheet_url.split('/')
elif sheet_url.startswith('docs'):
sheet_url.split('/')
elif sheet_url.startswith('docs'):
sheet_url.split('/')
# sample:
# https://docs.google.com/spreadsheets/d/1eLbX_5EFvZYc7JOGYF8ATdu5uQeu6OvILNnr4vH3vFI/pubhtml
# =>
# https://spreadsheets.google.com/feeds/list/___/od6/public/basic?alt=json
# https://spreadsheets.google.com/feeds/list/1eLbX_5EFvZYc7JOGYF8ATdu5uQeu6OvILNnr4vH3vFI/od6/public/basic?alt=json
@services.route('/log/', methods=['GET', 'POST', 'OPTIONS'])
@services.route('/log', methods=['GET', 'POST', 'OPTIONS'])
#@crossdomain(origin='*')
def log_event():
user_id = request.form.get('user_id', "")
if user_id == "":
user_id = str(request.remote_addr)
question_id = request.form.get('question_id', "")
event = request.form.get('event', "")
action = request.form.get('action', "")
body = request.form.get('body', "")
external_interactions_logger = logging.getLogger('ExternalInteractions')
external_interactions_logger.info(
StructuredEvent(user_id, question_id, event, action, body)
)
response = make_response('success')
response.headers['Access-Control-Allow-Origin'] = "*"
return response
|
sil-jterm-2015/sfwebchecks
|
src/scripts/language picker/build-json-language-data.py
|
Python
|
mit
| 8,241
| 0.004004
|
#!/usr/bin/env python
"""Parses language data from IANA subtag registry plus several other files,
and outputs JSON data in the following format:
[
{
'name': 'Ghotuo',
'code': { 'three': 'aaa' },
'country': ['Nigeria'],
'altNames': [],
},
{
'name': 'Alumu',
'code': { 'three': 'aab' },
'country': ['Nigeria'],
'altNames': ['Alumu', 'Tesu', 'Arum', 'Alumu-Tesu', 'Alumu', 'Arum-Cesu', 'Arum-Chessu', 'Arum-Tesu'],
},
# ...
{
'name': 'Waorani',
'code': { 'three': 'auc' },
'country': ['Brazil'],
'altNames': ['Huaorani', 'Sabela', 'Waodani', 'Auca'], # Pejorative names like Auca are *not* flagged as such
}
# ...
{
'name': 'English',
'code': { 'two': 'en', 'three': 'eng' },
'country': ['Australia', 'United Kingdom', 'United States', ...],
'altNames': ['Belfast', 'Birmingham', ...], # Dialects are *not* flagged as such
}
# ...
]"""
import os, sys
import re
from pprint import pprint, pformat
import codecs
import collections
import json
# Constants - mostly hardcoded filenames
SUBTAG_REGISTRY_FNAME = "ianaSubtagRegistry.txt"
COUNTRY_CODES_FNAME = "CountryCodes.txt"
LANGUAGE_CODES_FNAME = "LanguageCodes.txt"
LANGUAGE_INDEX_FNAME = "LanguageIndex.txt"
CONVERT_2_TO_3_FNAME = "TwoToThreeCodes.txt"
OUTPUT_FNAME = "inputSystems_languages.js"
OUTPUT_KEY_ORDER = ['name', 'code', 'country', 'altNames']
# OUTPUT_PREFIX is the text to write *before* the JSON output
OUTPUT_PREFIX = """\
'use strict';
// THIS FILE IS AUTOMATICALLY GENERATED.
// Do not make changes to this file; they will be overwritten.
// input systems languages data
var _inputSystems_languages = """
# OUTPUT_SUFFIX is the text to write *after* the JSON output
OUTPUT_SUFFIX = ";\n"
def read_file(fname):
with codecs.open(fname, 'rU', 'utf-8-sig') as f:
result = f.read() # utf-8-sig means strip BOM from start of file, if present
return result
def read_all_files():
try:
data = {
"subtags": read_file(SUBTAG_REGISTRY_FNAME),
"ccs": read_file(COUNTRY_CODES_FNAME),
"lcs": read_file(LANGUAGE_CODES_FNAME),
"lndx": read_file(LANGUAGE_INDEX_FNAME),
"2to3": read_file(CONVERT_2_TO_3_FNAME),
}
except IOError:
return None
else:
return data
def parse_subtag_registry(raw_text):
"""Returns data as a dict of lists, keyed by record type:
result['language'] = (list of language records)
result['extlang'] = (list of extended language records)
result['script'] = (list of script records)
And so on. Valid keys for result dict will be language, extlang, script,
region, variant, grandfathered, redundant."""
result = collections.defaultdict(list)
records = raw_text.split(u"%%\n")
for record in records:
data = {}
if record.startswith(u"File-Date:"):
continue # First "record" of file is only file-date
record = record.replace(u"\n ", u" ") # Line continuations: newline plus two spaces
record = re.sub(u" +", u" ", record) # Multiple spaces are collapsed into one
|
, per spec
for line in record.splitlines():
key, val = line.split(": ", 1)
if key == 'Description':
# Descriptions can, and often do, appear more than once per record
data.setdefault(key, []).append(val)
else:
data[key] = val
result[data[u'Type']].append(data)
return result
def parse_tab_separated_file(raw_text, first_line_contains_field
|
_names=True):
"""Returns data as either:
- a list of dicts, if first_line_contains_field_names is True
- a list of lists, if first_line_contains_field_names is False
"""
result = []
lines = raw_text.splitlines()
if first_line_contains_field_names:
field_names = lines[0].split('\t')
lines = lines[1:]
for line in lines:
fields = [field.strip() for field in line.split('\t') if line.strip()]
if first_line_contains_field_names:
result.append(dict(zip(field_names, fields)))
else:
result.append(fields)
return result
def parse_all_files(data):
result = {}
result['subtags'] = parse_subtag_registry(data['subtags'])
result['ccs'] = parse_tab_separated_file(data['ccs'], True)
result['lcs'] = parse_tab_separated_file(data['lcs'], True)
result['lndx'] = parse_tab_separated_file(data['lndx'], True)
result['2to3'] = parse_tab_separated_file(data['2to3'], False)
# Build lookup tables
result['2to3_lookup'] = {record[0]: record[1] for record in result['2to3']}
result['3to2_lookup'] = {record[1]: record[0] for record in result['2to3']}
result['country_lookup'] = {record['CountryID']: record['Name'] for record in result['ccs']}
return result
def build_language_data(data):
result = collections.OrderedDict()
for language_record in data['lndx']:
langid3 = language_record[u'LangID']
langid = data['3to2_lookup'].get(langid3, langid3) # 2-letter code preferred, 3-letter code is fallback
record = result.get(langid, {})
if not record.has_key('code'):
record['code'] = {}
if len(langid) == 2:
record['code']['two'] = langid
record['code']['three'] = langid3
country = data['country_lookup'].get(language_record[u'CountryID'])
if country:
record.setdefault('country', set()).add(country)
name = language_record['Name']
if language_record['NameType'] == 'L':
record['name'] = name
else:
record.setdefault('altNames', set()).add(name)
if not result.has_key(langid):
result[langid] = record
return result
def build_regions_data(data):
result = collections.OrderedDict()
for record in data['ccs']:
result[record['CountryID']] = record['Name']
return result
def build_scripts_data(data):
result = collections.OrderedDict()
for record in data['subtags']['script']:
result[record['Subtag']] = record['Description']
return result
def write_json(final_result, out_fname, prefix, suffix, fix_records=False):
if fix_records:
records_for_output = []
for record in final_result.itervalues():
for key in ['country', 'altNames']:
if record.has_key(key):
record[key] = list(sorted(record[key]))
else:
record[key] = [] # Ensure country and altNames lists exist, even if they're empty
# Rearrange output record so keys will be in predictable order in JSON file
new_record = collections.OrderedDict()
for key in OUTPUT_KEY_ORDER:
new_record[key] = record[key]
records_for_output.append(new_record)
else:
records_for_output = final_result
with codecs.open(out_fname, 'wU', 'utf-8') as f:
f.write(prefix)
json.dump(records_for_output, f, ensure_ascii=False, indent=4, separators=(',', ': '))
f.write(suffix)
def main():
sys.stderr.write('Reading files...\n')
data = read_all_files()
if not data:
sys.stderr.write("Error reading input data files\n")
sys.exit(2)
sys.stderr.write('Parsing files...\n')
data = parse_all_files(data)
sys.stderr.write('Preparing JSON output...\n')
langdata = build_language_data(data)
write_json(langdata, OUTPUT_FNAME, OUTPUT_PREFIX, OUTPUT_SUFFIX, fix_records=True)
regdata = build_regions_data(data)
write_json(regdata,
OUTPUT_FNAME.replace('languages', 'regions'),
OUTPUT_PREFIX.replace('languages', 'regions'),
OUTPUT_SUFFIX.replace('languages', 'regions'))
scriptdata = build_scripts_data(data)
write_json(scriptdata,
OUTPUT_FNAME.replace('languages', 'scripts'),
OUTPUT_PREFIX.replace('languages', 'scripts'),
OUTPUT_SUFFIX.replace('lan
|
TachoMex/Compiladores-14b
|
Parser/compilador/programa.py
|
Python
|
gpl-2.0
| 888
| 0.108108
|
func int suma(int a, int b)
return a+b
endfunc
func int absdif(int a, int b)
if(a>b)
return a-b
else
return b-a
endif
endfunc
int i, a, b,B[200], aux, A[100]
int z=suma(2*5+a,aux*A[0])-absdif(10000, 500)
int w=10, C[a/b**aux]
double x=0, y, z, pi=3.141592
a=0
b=1
A[0]=10
A[a+b]=pi**x
for(
|
i=0,
|
i<10,i+=1)
print("f(")
print(i)
print(")=")
println(a)
aux=a
a=a+b
b=aux
endfor
read(x)
while(a<b)
println(x)
y=x*pi
z=ln(y)
x+=z**0.5
endwhile
if(x!=0)
println(a)
else
println(b)
endif
if(x!=0)
println(a)
elseif(x<0)
println(b)
elseif(1==1)
println(pi)
else
x+=1
endif
int MAX=1000
int lista[MAX]
int j
for(i=0, i<MAX, i+=1)
read(lista[i])
endfor
for(i=0, i<MAX, i+=1)
for(j=1, j<MAX, j+=1)
if(lista[j]<lista[j-1])
int temp=lista[j]
lista[j]=lista[j-1]
lista[j-1]=temp
endif
endfor
endfor
i=0
while(not (i==10))
println(i)
i+=1
endwhile
|
deepsrijit1105/edx-platform
|
lms/djangoapps/instructor_task/tests/test_integration.py
|
Python
|
agpl-3.0
| 29,509
| 0.003728
|
"""
Integration Tests for LMS instructor-initiated background tasks.
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
from collections import namedtuple
import ddt
import json
import logging
from mock import patch
from nose.plugins.attrib import attr
import textwrap
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from openedx.core.djangoapps.util.testing import TestConditionalContent
from capa.tests.response_xml_factory import (CodeResponseXMLFactory,
CustomResponseXMLFactory)
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore import ModuleStoreEnum
from courseware.model_data import StudentModule
from instructor_task.api import (submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students)
from instructor_task.models import InstructorTask
from instructor_task.tasks_helper import upload_grades_csv
from instructor_task.tests.test_base import (
InstructorTaskModuleTestCase,
TestReportMixin,
OPTION_1,
OPTION_2,
)
from capa.responsetypes import StudentInputError
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
log = logging.getLogger(__name__)
class TestIntegrationTask(InstructorTaskModuleTestCase):
"""
Base class to provide general methods used for "integration" testing of particular tasks.
"""
def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
"""Confirm that expected values are stored in InstructorTask on task failure."""
instructor_task = InstructorTask.objects.get(id=entry_id)
self.assertEqual(instructor_task.task_state, FAILURE)
self.assertEqual(instructor_task.requester.username, 'instructor')
self.assertEqual(instructor_task.task_type, task_type)
task_input = json.loads(instructor_task.task_input)
self.assertNotIn('student', task_input)
self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
status = json.loads(instructor_task.task_output)
self.assertEqual(status['exception'], 'ZeroDivisionError')
self.assertEqual(status['message'], expected_message)
# check status returned:
status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
self.assertEqual(status['message'], expected_message)
@attr(shard=3)
@ddt.ddt
class TestRescoringTask(TestIntegrationTask):
"""
Integration-style tests for rescoring problems in a background task.
Exercises real problems with a minimum of patching.
"""
def setUp(self):
super(TestRescoringTask, self).setUp()
self.initialize_course()
self.create_instructor('instructor')
self.user1 = self.create_student('u1')
self.user2 = self.create_student('u2')
self.user3 = self.create_student('u3')
self.user4 = self.create_student('u4')
self.users = [self.user1, self.user2, self.user3, self.user4]
self.logout()
# set up test user for performing test operations
self.setup_user()
def render_problem(self, username, problem_url_name):
"""
Use ajax interface to request html for a problem.
"""
# make sure that the requested user is logged in, so that the ajax call works
# on the right problem:
self.login_username(username)
# make ajax call:
modx_url = reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': 'problem_get',
})
resp = self.client.post(modx_url, {})
return resp
def check_state(self, user, descriptor, expected_score, expected_max_score, expected_attempts=1):
"""
Check that the StudentModule state contains the expected values.
The student module is found fo
|
r the test course, given the `username` and problem `descriptor`.
Values checked include the number of attempts, the score, and the max score for a problem.
"""
module = self.get_student_module(user.username, descriptor)
self.asser
|
tEqual(module.grade, expected_score)
self.assertEqual(module.max_grade, expected_max_score)
state = json.loads(module.state)
attempts = state['attempts']
self.assertEqual(attempts, expected_attempts)
if attempts > 0:
self.assertIn('correct_map', state)
self.assertIn('student_answers', state)
self.assertGreater(len(state['correct_map']), 0)
self.assertGreater(len(state['student_answers']), 0)
# assume only one problem in the subsection and the grades
# are in sync.
expected_subsection_grade = expected_score
course_grade = CourseGradeFactory(user).create(self.course)
self.assertEquals(
course_grade.subsection_grade_totals_by_format['Homework'][0].earned,
expected_subsection_grade,
)
def submit_rescore_all_student_answers(self, instructor, problem_url_name):
"""Submits the particular problem for rescoring"""
return submit_rescore_problem_for_all_students(self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name))
def submit_rescore_one_student_answer(self, instructor, problem_url_name, student):
"""Submits the particular problem for rescoring for a particular student"""
return submit_rescore_problem_for_student(self.create_task_request(instructor),
InstructorTaskModuleTestCase.problem_location(problem_url_name),
student)
RescoreTestData = namedtuple('RescoreTestData', 'edit, new_expected_scores, new_expected_max')
@ddt.data(
RescoreTestData(edit=dict(correct_answer=OPTION_2), new_expected_scores=(0, 1, 1, 2), new_expected_max=2),
RescoreTestData(edit=dict(num_inputs=2), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
RescoreTestData(edit=dict(num_inputs=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
RescoreTestData(edit=dict(num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=4),
RescoreTestData(edit=dict(num_inputs=2, num_responses=4), new_expected_scores=(2, 1, 1, 0), new_expected_max=8),
)
@ddt.unpack
def test_rescoring_option_problem(self, problem_edit, new_expected_scores, new_expected_max):
"""
Run rescore scenario on option problem.
Verify rescoring updates grade after content change.
Original problem definition has:
num_inputs = 1
num_responses = 2
correct_answer = OPTION_1
"""
# get descriptor:
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
descriptor = self.module_store.get_item(location)
# first store answers for each of the separate users:
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])
# verify each user's grade
expected_original_scores = (2, 1, 1, 0)
e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.