repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
nyaadevs/nyaa | utils/api_uploader_v2.py | Python | gpl-3.0 | 6,976 | 0.00258 | #!/usr/bin/env python3
import argparse
import json
import os
import requests
NYAA_HOST = 'https://nyaa.si'
SUKEBEI_HOST = 'https://sukebei.nyaa.si'
API_BASE = '/api'
API_UPLOAD = API_BASE + '/upload'
NYAA_CATS = '''1_1 - Anime - AMV
1_2 - Anime - English
1_3 - Anime - Non-English
1_4 - Anime - Raw
2_1 - Audio - Lossless
2_2 - Audio - Lossy
3_1 - Literature - English-translated
3_2 - Literature - Non-English
3_3 - Literature - Non-English-Translated
3_4 - Literature - Raw
4_1 - Live Action - English-translated
4_2 - Live Action - Idol/Promotional Video
4_3 - Live Action - Non-English-translated
4_4 - Live Action - Raw
5_1 - Pictures - Graphics
5_2 - Pictures - Photos
6_1 - Software - Applications
6_2 - Software - Games'''
SUKEBEI_CATS = '''1_1 - Art - Anime
1_2 - Art - Doujinshi
1_3 - Art - Games
1_4 - Art - Manga
1_5 - Art - Pictures
2_1 - Real Life - Photobooks / Pictures
2_2 - Real Life - Videos'''
class CategoryPrintAction(argparse.Action):
def __init__(self, option_strings, nargs='?', help=None, **kwargs):
super().__init__(option_strings=option_strings,
dest='site',
default=None,
nargs=nargs,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
if values and values.lower() == 'sukebei':
print("Sukebei categories")
print(SUKEBEI_CATS)
else:
print("Nyaa categories")
print(NYAA_CATS)
parser.exit()
environment_epillog = ('You may also provide environment variables NYAA_API_HOST, NYAA_API_USERNAME'
' and NYAA_API_PASSWORD for connection info.')
parser = argparse.ArgumentParser(
description='Upload torrents to Nyaa.si', epilog=environment_epillog)
parser.add_argument('--list-categories', default=False, action=CategoryPrintAction, nargs='?',
help='List torrent categories. Include "sukebei" to show Sukebei categories')
conn_group = parser.add_argument_group('Connection options')
conn_group.add_argument('-s', '--sukebei', default=False,
action='store_true', help='Upload to sukebei.nyaa.si')
conn_group.add_argument('-u', '--user', help='Username or email')
conn_group.add_argument('-p', '--password', help='Password')
conn_group.add_argument('--host', help='Select another api host (for debugging purposes)')
resp_group = parser.add_argument_group('Response options')
resp_group.add_argument('--raw', default=False, action='store_true',
help='Print only raw response (JSON)')
resp_group.add_argument('-m', '--magnet', default=False,
action='store_true', help='Print magnet uri')
tor_group = parser.add_argument_group('Torrent options')
tor_group.add_argument('-c', '--category', required=True, help='Torrent category (see ). Required.')
tor_group.add_argument('-n' | , '--name', help='Display name for the torrent (optional)')
tor_group.ad | d_argument('-i', '--information', help='Information field (optional)')
tor_group.add_argument('-d', '--description', help='Description for the torrent (optional)')
tor_group.add_argument('-D', '--description-file', metavar='FILE',
help='Read description from a file (optional)')
tor_group.add_argument('-A', '--anonymous', default=False,
action='store_true', help='Upload torrent anonymously')
tor_group.add_argument('-H', '--hidden', default=False, action='store_true',
help='Hide torrent from results')
tor_group.add_argument('-C', '--complete', default=False, action='store_true',
help='Mark torrent as complete (eg. season batch)')
tor_group.add_argument('-R', '--remake', default=False, action='store_true',
help='Mark torrent as remake (derivative work from another release)')
trusted_group = tor_group.add_mutually_exclusive_group(required=False)
trusted_group.add_argument('-T', '--trusted', dest='trusted', action='store_true',
help='Mark torrent as trusted, if possible. Defaults to true')
trusted_group.add_argument('--no-trusted', dest='trusted',
action='store_false', help='Do not mark torrent as trusted')
parser.set_defaults(trusted=True)
tor_group.add_argument('torrent', metavar='TORRENT_FILE', help='The .torrent file to upload')
def crude_torrent_check(file_object):
''' Does a simple check to weed out accidentally picking a wrong file '''
# Check if file seems to be a bencoded dictionary: starts with d and end with e
file_object.seek(0)
if file_object.read(1) != b'd':
return False
file_object.seek(-1, os.SEEK_END)
if file_object.read(1) != b'e':
return False
# Seek back to beginning
file_object.seek(0)
return True
if __name__ == "__main__":
args = parser.parse_args()
# Use debug host from args or environment, if set
debug_host = args.host or os.getenv('NYAA_API_HOST')
api_host = (debug_host or (args.sukebei and SUKEBEI_HOST or NYAA_HOST)).rstrip('/')
api_upload_url = api_host + API_UPLOAD
if args.description_file:
# Replace args.description with contents of the file
with open(args.description_file, 'r') as in_file:
args.description = in_file.read()
torrent_file = open(args.torrent, 'rb')
# Check if the file even seems like a torrent
if not crude_torrent_check(torrent_file):
raise Exception("File '{}' doesn't seem to be a torrent file".format(args.torrent))
api_username = args.user or os.getenv('NYAA_API_USERNAME')
api_password = args.password or os.getenv('NYAA_API_PASSWORD')
if not (api_username and api_password):
raise Exception('No authorization found from arguments or environment variables.')
auth = (api_username, api_password)
data = {
'name': args.name,
'category': args.category,
'information': args.information,
'description': args.description,
'anonymous': args.anonymous,
'hidden': args.hidden,
'complete': args.complete,
'remake': args.remake,
'trusted': args.trusted,
}
encoded_data = {
'torrent_data': json.dumps(data)
}
files = {
'torrent': torrent_file
}
# Go!
r = requests.post(api_upload_url, auth=auth, data=encoded_data, files=files)
torrent_file.close()
if args.raw:
print(r.text)
else:
try:
response = r.json()
except ValueError:
print('Bad response:')
print(r.text)
exit(1)
errors = response.get('errors')
if errors:
print('Upload failed', errors)
exit(1)
else:
print("[Uploaded] {url} - '{name}'".format(**response))
if args.magnet:
print(response['magnet'])
|
ekopylova/burrito-fillings | bfillings/fastq_join.py | Python | bsd-3-clause | 8,636 | 0.000695 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Application controller for ea-utils v1.1.2-537
# fastq processing utilities
# http://code.google.com/p/ea-utils/
#
import os
import tempfile
import shutil
from burrito.parameters import ValuedParameter
from burrito.util import CommandLineApplication, ResultPath
class FastqJoin(CommandLineApplication):
"""fastq-join (v1.1.2) application controller for joining paired-end reads."""
_command = 'fastq-join'
_parameters = {
# Description copied from 'fastq-join'
# Usage: fastq-join [options] <read1.fq> <read2.fq> [mate.fq] -o
# <read.%.fq>
# Output:
# You can supply 3 -o arguments, for un1, un2, join files, or one
# argument as a file name template. The suffix 'un1, un2, or join' is
# appended to the file, or they replace a %-character if present.
# If a 'mate' input file is present (barcode read), then the files
# 'un3' and 'join2' are also created.
# we'll only handle one output base path / file name
# -o FIL: See 'Output' above
'-o': ValuedParameter(Prefix='-', Delimiter=' ', Name='o'),
# -v C: Verifies that the 2 files probe id's match up to char C
# use ' ' (space) for Illumina reads
'-v': ValuedParameter(Prefix='-', Delimiter=' ', Name='v'),
# -p N: N-percent maximum difference (8)
'-p': ValuedParameter(Prefix='-', Delimiter=' ', Name='p'),
# -m N: N-minimum overlap (6)
'-m': ValuedParameter(Prefix='-', Delimiter=' ', Name='m'),
# -r FIL: Verbose stitch length report
'-r': ValuedParameter(Prefix='-', Delimiter=' ', Name='r')}
_input_handler = '_input_as_paths'
def _get_output_path(self):
"""Checks if a base file label / path is set. Returns absolute path."""
if self.Parameters['-o'].isOn():
| output_path = self._absolute(str(self.Parameters['-o'].Value))
else:
raise ValueError("No output path specified.")
return output_path
def _get_stitch_report_path(self):
"""Checks if stitch report label / path is set. Returns absolute path."""
if self.Parameters['-r'].isOn():
stitch_path = self._absolute(str(self.Parameters['-r'].Val | ue))
return stitch_path
elif self.Parameters['-r'].isOff():
return None
def _get_result_paths(self, data):
"""Capture fastq-join output.
Three output files are produced, in the form of
outputjoin : assembled paired reads
outputun1 : unassembled reads_1
outputun2 : unassembled reads_2
If a barcode / mate-pairs file is also provided then the following
additional files are output:
outputjoin2
outputun3
If a verbose stitch length report (-r) is chosen to be written by the
user then use a user specified filename.
"""
output_path = self._get_output_path()
result = {}
# always output:
result['Assembled'] = ResultPath(Path=output_path + 'join',
IsWritten=True)
result['UnassembledReads1'] = ResultPath(Path=output_path + 'un1',
IsWritten=True)
result['UnassembledReads2'] = ResultPath(Path=output_path + 'un2',
IsWritten=True)
# check if stitch report is requested:
stitch_path = self._get_stitch_report_path()
if stitch_path:
result['Report'] = ResultPath(Path=stitch_path,
IsWritten=True)
# Check if mate file / barcode file is present.
# If not, return result
# We need to check this way becuase there are no infile parameters.
mate_path_string = output_path + 'join2'
mate_unassembled_path_string = output_path + 'un3'
if os.path.exists(mate_path_string) and \
os.path.exists(mate_unassembled_path_string):
result['Mate'] = ResultPath(Path=mate_path_string,
IsWritten=True)
result['MateUnassembled'] = ResultPath(Path=
mate_unassembled_path_string,
IsWritten=True)
else:
pass
return result
def getHelp(self):
"""fastq-join (v1.1.2) help"""
help_str = """
For issues with the actual program 'fastq-join', see the following:
For basic help, type the following at the command line:
'fastq-join'
Website:
http://code.google.com/p/ea-utils/
For questions / comments subit an issue to:
http://code.google.com/p/ea-utils/issues/list
"""
return help_str
def join_paired_end_reads_fastqjoin(
reads1_infile_path,
reads2_infile_path,
perc_max_diff=None, # typical default is 8
min_overlap=None, # typical default is 6
outfile_label='fastqjoin',
params={},
working_dir=tempfile.gettempdir(),
SuppressStderr=True,
SuppressStdout=True,
HALT_EXEC=False):
""" Runs fastq-join, with default parameters to assemble paired-end reads.
Returns file path string.
-reads1_infile_path : reads1.fastq infile path
-reads2_infile_path : reads2.fastq infile path
-perc_max_diff : maximum % diff of overlap differences allowed
-min_overlap : minimum allowed overlap required to assemble reads
-outfile_label : base name for output files.
-params : dictionary of application controller parameters
"""
abs_r1_path = os.path.abspath(reads1_infile_path)
abs_r2_path = os.path.abspath(reads2_infile_path)
infile_paths = [abs_r1_path, abs_r2_path]
# check / make absolute infile paths
for p in infile_paths:
if not os.path.exists(p):
raise IOError('File not found at: %s' % p)
fastq_join_app = FastqJoin(params=params,
WorkingDir=working_dir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout,
HALT_EXEC=HALT_EXEC)
# set param. Helps with QIIME integration to have these values
# set to None by default. This way we do not have to worry
# about changes in default behaviour of the wrapped
# application
if perc_max_diff is not None:
if isinstance(perc_max_diff, int) and 0 <= perc_max_diff <= 100:
fastq_join_app.Parameters['-p'].on(perc_max_diff)
else:
raise ValueError("perc_max_diff must be int between 0-100!")
if min_overlap is not None:
if isinstance(min_overlap, int) and 0 < min_overlap:
fastq_join_app.Parameters['-m'].on(min_overlap)
else:
raise ValueError("min_overlap must be an int >= 0!")
if outfile_label is not None:
if isinstance(outfile_label, str):
fastq_join_app.Parameters['-o'].on(outfile_label + '.')
else:
raise ValueError("outfile_label must be a string!")
else:
pass
# run assembler
result = fastq_join_app(infile_paths)
# Store output file path data to dict
path_dict = {}
path_dict['Assembled'] = result['Assembled'].name
path_dict['UnassembledReads1'] = result['UnassembledReads1'].name
path_dict['UnassembledReads2'] = result['UnassembledReads2'].name
# sanity check that files actually exist in path lcoations
for path in path_dict.values():
if not os.path.exists(path):
raise IOError('Output file not found at: %s |
scottphilip/caller-lookup | CallerLookup/Responses.py | Python | gpl-3.0 | 1,950 | 0.000513 | # Author: Scott Philip (sp@scottphilip.com)
# Version: 1.2 (25 July 2017)
# Source: https://github.com/scottphilip/caller-lookup/
# Licence: GNU GENERAL PUBLIC LICENSE (Version 3, 29 June 2007)
from CallerLookup.Strings import CallerLookupLabel, CallerLookupKeys
from CallerLookup.Utils.Logs import format_exception
def get_response_invalid(number, region):
return {CallerLookupLabel.RESULT: CallerLookupLabel.INVALID_NUMBER,
CallerLookupLabel.NUMBER: number,
CallerLookupLabel.REGION: region}
def get_response_error(ex):
result = {CallerLookupLabel.RESULT: CallerLookupLabel.ERROR,
CallerLookupLabel.MESSAGE: format_exception(ex)}
return result
def get_response_success(number_data, data):
result = {CallerLookupLabel.RESULT: CallerLookupLabel.UNKNOWN,
CallerLookupLabel.SCORE: 100}
result.update( | number_data)
if data is None or CallerLookupKeys.KEY_DATA not in data:
return result
data = data[CallerLookupKeys.KEY_DATA]
if len(data) == 0:
return result
data = data[0]
if CallerLookupKeys. | KEY_SCORE in data:
result[CallerLookupLabel.SCORE] = round(data[CallerLookupKeys.KEY_SCORE] * 100)
if CallerLookupKeys.KEY_ADDRESSES in data:
addresses = data[CallerLookupKeys.KEY_ADDRESSES]
if len(addresses) > 0:
if CallerLookupKeys.KEY_COUNTRY_CODE in addresses[0]:
result[CallerLookupLabel.REGION] = \
addresses[0][CallerLookupKeys.KEY_COUNTRY_CODE].upper()
if CallerLookupKeys.KEY_ADDRESS in addresses[0]:
result[CallerLookupLabel.ADDRESS] = \
addresses[0][CallerLookupKeys.KEY_ADDRESS]
if CallerLookupKeys.KEY_NAME in data:
result[CallerLookupLabel.NAME] = data[CallerLookupKeys.KEY_NAME]
result[CallerLookupLabel.RESULT] = CallerLookupLabel.SUCCESS
return result
|
cnamejj/PollEngine | just-otp.py | Python | apache-2.0 | 4,562 | 0.014467 | #!/usr/bin/env python
"""
Driver for OpenVPN client
"""
import time
import base64
import hashlib
import struct
import hmac
import pollengine
# ---
class OVDriveData:
"""OpenVPN authentication data"""
def __init__( self ):
self.otp_secret = NO_SECRET
# ---
class ShellAdmin:
"""Driver for admin shell object"""
def __init__( self ):
# PE.displaymsg( 'dbg:: Shell_Admin init' )
self.ovdata = OVDriveData()
def set_ovdata( self, ovdata ):
"""Record a data structure"""
self.ovdata = ovdata
def process_request( self, conn ):
"""Handle requests received from the client"""
# PE.displaymsg( 'Received {size} bytes "{req}"'.format(
# size=len(conn.inp_queue), req=conn.inp_queue) )
__continue = True
__lines = conn.inp_queue.split( '\n' )
for __req in __lines:
__words = __req.split( ' ' )
# PE.displaymsg( 'dbg:: line "{line}" words {nw}'.format(line=__req,
# nw=len(__words)) )
if len(__words) > 0:
__comm = __words[0]
if __comm == '':
continue
if __comm == ACR_SECRET:
if len(__words) == 2:
self.ovdata.otp_secret = __words[1]
PE.displaymsg( 'Ad | min: set OTP secret' )
conn.queue_response( 'OTP secret' )
else:
conn.queue_response(
'The "secret" command takes one argument' )
elif __comm == ACR_SHOW:
PE.displaymsg( 'Admin: show current settings' )
conn.queue_response( SHOW_TEMPLATE.format(
| secret=self.ovdata.otp_secret) )
elif __comm == ACR_OTP:
__otp = gen_otp(self.ovdata)
PE.displaymsg( 'Admin: Generate OTP {otp}'.format(
otp=__otp[-6:]) )
conn.queue_response( 'OTP for {now} is {otp}'.format(
now=time.strftime('%y/%m/%d-%H:%M:%S'),
otp=__otp[-6:]) )
elif __comm == ACR_DROP:
PE.displaymsg( 'Admin: drop client connection' )
conn.close()
elif __comm == ACR_QUIT:
PE.displaymsg( 'Admin: stop OVDriver' )
__continue = False
else:
PE.displaymsg( 'Admin: unrecognized command "{comm}"'.
format(comm=__comm) )
conn.queue_response( 'Command "{comm}" unrecognized'.
format(comm=__comm) )
# conn.queue_response( 'Got {nl} lines, {size} bytes\n'.format(
# size=len(conn.inp_queue), nl=len(__lines)) )
conn.inp_lines = ''
conn.inp_queue = ''
return __continue
def get_name( self ):
"""Return a description of the class"""
return 'Shell_Admin'
# ---
def gen_otp( ovdata ):
"""Generate the current one-time-password"""
try:
__n_interval = time.time() // OTP_INTERVAL
__key = base64.b32decode( ovdata.otp_secret, casefold=OTP_COLLAPSE )
__msg = struct.pack( '>Q', __n_interval )
__hdig = hmac.new( __key, __msg, OTP_DIGEST_METH ).digest()
__ob_low = ord( __hdig[19] ) & 15
__fulltoken = struct.unpack( '>I', __hdig[__ob_low:__ob_low + 4])
__token = (__fulltoken[0] & OTP_TOKEN_MASK) % (10 ** OTP_TOKEN_LEN)
__result = '{token:06d}'.format(token=__token)
except TypeError:
__result = OTP_ERROR
# PE.displaymsg( 'dbg:: Generate OTP {otp}'.format(otp=__result) )
return( __result )
# ---
ADMIN_PORT = 8888
NO_SECRET = 'NoOTPSecret'
OTP_INTERVAL = 30
OTP_TOKEN_LEN = 6
OTP_DIGEST_METH = hashlib.sha1
OTP_COLLAPSE = True
OTP_TOKEN_MASK = 0x7fffffff
OTP_ERROR = 'OTP-Failed'
ACR_SECRET = 'secret'
ACR_SHOW = 'show'
ACR_QUIT = 'quit'
ACR_OTP = 'otp'
ACR_DROP = 'drop'
# Comma is needed to make this a tuple
SERVER_SPEC_LIST = ( ADMIN_PORT, )
SHOW_TEMPLATE = 'Secret: {secret}'
# ---
print "Listening on port {port}".format(port=SERVER_SPEC_LIST[0])
PE = pollengine
ENGINE = PE.Engine( server_list = SERVER_SPEC_LIST )
OVDATA = OVDriveData()
OVDATA.otp_secret = NO_SECRET
ADMIN_SHELL = ShellAdmin()
ADMIN_SHELL.set_ovdata( OVDATA )
ENGINE.config_server( ADMIN_PORT, shell = ADMIN_SHELL )
ENGINE.run()
|
peastman/deepchem | deepchem/models/tests/test_predict.py | Python | mit | 1,984 | 0.006552 | """
Tests that deepchem models make deterministic predictions.
"""
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import deepchem as dc
try:
import tensorflow as tf
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
has_tensorflow = True
except:
has_tensorflow = False
class TestPredict(unittest.TestCase):
"""
Test that models make deterministic predictions
These tests guard against failures like having dropout turned on at
test time.
"""
def setUp(self):
super(TestPredict, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
'''
def test_tf_progressive_regression_predict(self):
"""Test tf progressive multitask makes deterministic predictions."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc. | metrics.mean_squared_error, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[.25],
learning_rate=0.003,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Check same predictions are made.
y_ | pred_first = model.predict(dataset)
y_pred_second = model.predict(dataset)
np.testing.assert_allclose(y_pred_first, y_pred_second)
'''
|
physycom/QGIS | python/plugins/processing/algs/qgis/SetRasterStyle.py | Python | gpl-2.0 | 2,755 | 0.001452 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SetRasterStyle.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsProcessingAlgorithm,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFile,
QgsProcessingOutpu | tRasterLayer)
from processing.algs.qgis | .QgisAlgorithm import QgisAlgorithm
class SetRasterStyle(QgisAlgorithm):
INPUT = 'INPUT'
STYLE = 'STYLE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster tools')
def groupId(self):
return 'rastertools'
def __init__(self):
super().__init__()
def flags(self):
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading | QgsProcessingAlgorithm.FlagDeprecated
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Raster layer')))
self.addParameter(QgsProcessingParameterFile(self.STYLE,
self.tr('Style file'), extension='qml'))
self.addOutput(QgsProcessingOutputRasterLayer(self.INPUT, self.tr('Styled')))
def name(self):
return 'setstyleforrasterlayer'
def displayName(self):
return self.tr('Set style for raster layer')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
style = self.parameterAsFile(parameters, self.STYLE, context)
with open(style) as f:
xml = "".join(f.readlines())
d = QDomDocument()
d.setContent(xml)
layer.importNamedStyle(d)
layer.triggerRepaint()
return {self.INPUT: layer}
|
jiadaizhao/LeetCode | 0201-0300/0228-Summary Ranges/0228-Summary Ranges.py | Python | mit | 455 | 0 | class Solution:
def summaryRanges(self, nums: List[int]) -> List[str]:
i = 0
result = []
while i < len(nums):
start = i
while i + 1 < len(nums) and nums[i] + 1 == nums[i + 1]:
| i += 1
if i == start:
| result.append(str(nums[start]))
else:
result.append(str(nums[start]) + '->' + str(nums[i]))
i += 1
return result
|
rdio/sentry | src/sentry/management/commands/create_sample_event.py | Python | bsd-3-clause | 1,839 | 0.002719 |
"""
sentry.management.commands.create_sample_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license | : BSD, see LICENSE for more details.
"""
from django.core.management.base import BaseCommand, CommandError, | make_option
class Command(BaseCommand):
help = 'Creates a sample event in Sentry (if applicable)'
option_list = BaseCommand.option_list + (
make_option('--project', dest='project', help="project ID or team-slug/project-slug"),
make_option('--platform', dest='platform'),
)
def handle(self, **options):
from django.conf import settings
from sentry.constants import PLATFORM_LIST
from sentry.models import Project
from sentry.utils.samples import create_sample_event
if not options['project']:
project = Project.objects.get(id=settings.SENTRY_PROJECT)
else:
if options['project'].isdigit():
project = Project.objects.get(id=options['project'])
elif '/' in options['project']:
t_slug, p_slug = options['project'].split('/', 1)
project = Project.objects.get(slug=p_slug, team__slug=t_slug)
else:
raise CommandError('Project must be specified as team-slug/project-slug or a project id')
if options['platform'] not in PLATFORM_LIST:
raise CommandError('Invalid platform. Must specify one of: %s' % ', '.join(PLATFORM_LIST))
platform = options['platform'] or project.platform
event = create_sample_event(project, platform)
if not event:
raise CommandError('Unable to create an event for platform %r' % (str(platform),))
self.stdout.write('Event created: %s' % (event.group.get_absolute_url(),))
|
xpharry/Udacity-DLFoudation | tutorials/reinforcement/gym/gym/envs/safety/predict_actions_cartpole.py | Python | mit | 2,176 | 0.003217 | """
predict_actions_cartpole is the cartpole task but where the agent will
get extra reward for saying what its next 5 *actions* will be.
This is a toy problem but the principle is useful -- imagine a household robot
or a self-driving car that accurately tells you what it's going to do before it does it.
This'll inspire confidence in the user.
Note: We don't allow agents to get the bonus reward before TIME_BEFORE_BONUS_ALLOWED.
This is to require that agents actually solve the cartpole problem before working on
being interpretable. We don't want bad agents just focusing on predicting their own badness.
"""
from gym.envs.classic_control.cartpole import CartPoleEnv
from gym import Env, spaces
NUM_PREDICTED_ACTIONS = 5
TIME_BEFORE_BONUS_ALLOWED = 100
CORRECT_PREDICTION_BONUS = 0.1
class PredictActionsCartpoleEnv(Env):
def __init__(self):
super(PredictActionsCartpoleEnv, self).__init__()
self.cartpole = CartPoleEnv()
self.observation_space = self.cartpole.observation_space
self.action_space = spaces.Tuple((self.cartpole.action_space,) * (NUM_PREDICTED_ACTIONS+1))
def _seed(self, *n, **kw):
return self.cartpole._seed(*n, **kw)
def _render(self, *n, **kw):
return self.cartpole._render(*n, **kw)
def _configure(self, *n, **kw):
return self.cartpole._configure(*n, **kw)
def _step(self, action):
# the first element of action is the actual current action
current_action = action[0]
observation, reward, done, info = self.cartpole._step(current_action)
if not done:
if self.ite | ration > TIME_BEFORE_BONUS_ALLOWED:
for i in xrange(min(NUM_PREDICTED_ACTIONS, len(self.predicted_actions))):
if self.predicted_actions[-(i + 1)][i] == current_action:
reward += | CORRECT_PREDICTION_BONUS
self.predicted_actions.append(action[1:])
self.iteration += 1
return observation, reward, done, info
def _reset(self):
observation = self.cartpole._reset()
self.predicted_actions = []
self.iteration = 0
return observation
|
geekybeaver/Billy | common/controller/base.py | Python | gpl-3.0 | 1,515 | 0.00264 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __init__ import *
from common.models import Account
import logging
class BaseController(webapp.RequestHandler):
template_values = {}
template_module = ''
def get(self, action, key=None):
# FIXME mysterious bug this gets set over requests somehow
self.template_values = {}
self.pre_dispatch(action, key)
if hasattr(self, '%s_action' % action.rstrip('/')):
method = getattr(self, '%s_action' % action)
method(key)
self.renderTemplate(action, self.template_values)
def post(self, action, key=None):
self.pre_dispatch(action, key)
if hasattr(self, '%s_action' % action.rstrip('/')):
method = getattr(self, '%s_action' % action)
method(key)
def pre_dispatch(self, action, key=None):
pass
def renderTemplate(self, action, template_values):
path = os.path.join(os.path.dirname(__file__), '../../template/%s/%s.html'
% (self.template_module, action))
if os.path.exists(path):
self.response.out. | write(template.render(path, | self.template_values))
def _check_account(self, account):
current_account = Account().current()
if current_account.key() != account.key():
self.accessForbidden()
def accessForbidden(self):
self.response.out.write('You do not own that account')
self.response.set_status(403)
exit()
|
joeyb182/pynet_ansible | pynet_ansible/exercise_1_8.py | Python | apache-2.0 | 615 | 0.004878 | #!/usr/bin/env python
#8. Write a Python program using ciscoconfparse that par | ses cisco_crypto.txt. Note, this config file is not fully valid (i.e. parts of the configuration are missing). The script should find all of the crypto map entries in the file (lines that begin with 'crypto map CRYPTO') and for each crypto map entry print out its children.
from ciscoconfparse import CiscoConfParse
cisco_cfg = CiscoConfParse("cisco_crypto.txt")
crypto_cfg = cisco_cfg.find_objects(r"^crypto map CRYPTO")
for line in crypto_cfg:
print line.text
for kids | in line.children:
print kids.text
print
|
kingvuplus/EGAMI-D | lib/python/Plugins/Extensions/MediaPlayer/settings.py | Python | gpl-2.0 | 5,014 | 0.025329 | from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Components.FileList import FileList
from Components.Sources.StaticText import StaticText
from Components.config import config, getConfigListEntry, ConfigSubsection, ConfigText, ConfigYesNo, ConfigDirectory
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.Pixmap import Pixmap
from Components.Sources.Boolean import Boolean
config.mediaplayer = ConfigSubsection()
config.mediaplayer.repeat = ConfigYesNo(default=False)
config.mediaplayer.savePlaylistOnExit = ConfigYesNo(default=True)
config.mediaplayer.saveDirOnExit = ConfigYesNo(default=False)
config.mediaplayer.defaultDir = ConfigDirectory()
config.mediaplayer.sortPlaylists = ConfigYesNo(default=False)
config.mediaplayer.alwaysHideInfoBar = ConfigYesNo(default=True)
config.mediaplayer.onMainMenu = ConfigYesNo(default=False)
class DirectoryBrowser(Screen, HelpableScreen):
def __init__(self, session, currDir):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerDirectoryBrowser, then FileBrowser, this allows individual skinning
self.skinName = ["MediaPlayerDirectoryBrowser", "FileBrowser" ]
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Use"))
self.filelist = FileList(currDir, matchingPattern="")
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Directory browser"))
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
def use(self):
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
else:
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
class MediaPlayerSettings(Screen,ConfigListScreen):
def __init__(self, session, parent):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["MediaPlayerSettings", "Setup" ]
self.setup_title = _("Edit settings")
self.onChangedEntry = [ ]
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self.parent = parent
self.initConfigList()
config.mediaplayer.saveDirOnExit.ad | dNotifier(self.initConfigList)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.save,
"red": self.cancel,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def initConfigList(self, element=None):
print "[initConfigList]", element
try:
self.list = []
self.list.append(getConfigListEntry(_("repeat playlis | t"), config.mediaplayer.repeat))
self.list.append(getConfigListEntry(_("save playlist on exit"), config.mediaplayer.savePlaylistOnExit))
self.list.append(getConfigListEntry(_("save last directory on exit"), config.mediaplayer.saveDirOnExit))
if not config.mediaplayer.saveDirOnExit.value:
self.list.append(getConfigListEntry(_("start directory"), config.mediaplayer.defaultDir))
self.list.append(getConfigListEntry(_("sorting of playlists"), config.mediaplayer.sortPlaylists))
self.list.append(getConfigListEntry(_("Always hide infobar"), config.mediaplayer.alwaysHideInfoBar))
self.list.append(getConfigListEntry(_("show mediaplayer on mainmenu"), config.mediaplayer.onMainMenu))
self["config"].setList(self.list)
except KeyError:
print "keyError"
def changedConfigList(self):
self.initConfigList()
def ok(self):
if self["config"].getCurrent()[1] == config.mediaplayer.defaultDir:
self.session.openWithCallback(self.DirectoryBrowserClosed, DirectoryBrowser, self.parent.filelist.getCurrentDirectory())
def DirectoryBrowserClosed(self, path):
print "PathBrowserClosed:" + str(path)
if path:
config.mediaplayer.defaultDir.setValue(path)
def save(self):
for x in self["config"].list:
x[1].save()
self.close()
def cancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
|
adsass/astrometry | astrometry.net/SIMBAD.py | Python | mit | 12,840 | 0.009034 | #!/usr/bin/env python
"""Module implementing methods for SIMBAD.
Methods implemented in this module:
- makeQuery()
- getObjects()
For 'makeQuery' method:
Given a set of input parameters, the client sends a query
to the specified SIMBAD server. If the query is executed
successfully, the result will Python list of bibcodes.
If the query fails, the error message will be captured.
For 'getObjects' method:
Given a bibcode, the client returns a list of dictionaries,
one for each astronomical object associated with the bibcode.
The dictionary for each object contains parsed information,
and the raw string returned by the SIMBAD server. The dictionary
also contains the bibcode used for this query. The dictionaries have
the following format:
{'refcode': '2009Sci...326.1675B'
'id' : 'V* AA Tau',
'ra' : '068.73092',
'dec' : '+24.48144',
'otype' : 'Orion_V* ~',
'stype' : 'M0V:e,D,~',
'mtype' : '~,~,~',
'_raw' : 'V* AA Tau|068.73092|+24.48144|Orion_V* ~|M0V:e,D,~|~,~,~'
}
where 'id' is the first (main) identifier from the list of identifiers
of the object, 'ra' and 'dec' are the decimal coordinates, 'otype' is
the default display for the main object type (in the above example, this
is "Variable Star of Orion Type". See: http://cdsweb.u-strasbg.fr/cgi-bin/Otype?X),
'stype' is the spectral type with three parameters (string, quality and
bibliographic reference) and 'mtype' is the morphological type with three
parameteres (string, quality and bibliographic reference). The '_raw' entry
contains the whole string as returned by the server
The input parameters are as follows:
A. General parameters (all optional):
- 'URL' this parameter will change the default server
to be queried ('simbad.harvard.edu')
This parameter is set during the instantiation
of the client object 'Client(URL='...')'
- proxy this parameter will set a proxy server
- startyear,endyear return bibcodes in the year publication year
interval defined by these values
- journals this is a Python list of bibstems, defining the
journals to be returned
- debug turning debug on will provide more verbose output
This parameter is set during the instantiation
of the client object 'Client(debug=1)'
B. For object query (to get bibcodes):
- 'object' the name of the object (e.g. "M31", "Arcturus")
- 'radius' radius of circle around the object to expand
the search. The entry needs to have a qualifier
appended: 'd' for degrees, or 'm' for arcminutes
or 's' for arcseconds. The default is 20m.
C. For coordinate query:
- 'pstring' right ascension and declination for coordinate
query. Coordinates can be written in sexagesimal,
with spaces as field separators. A search radius can
be specified using a colon, and given either
sexigesimally or decimally. Its default value
is 2arcmin.
Examples:
05 23 34.6 -69 45 22:0 6
05 23 34.6 -69 45 22:0.166666
- 'frame' parameter to change the default 'frame' (ICRS).
Valid values are: ICRS, FK4, FK5, GAL, SGAL, ECL
- 'equinox' parameter to change the default 'equinox' (2006.7)
- 'epoch' paramater to change the default 'epoch' (J2000)
D. For astronomical object query (for a given bibcode):
- 'bibcode' bibcode of paper for which astronomical objects
are required
Examples:
>>> from ads.SIMBAD import Client as Client
>>> SimbadClient = Client(URL="http://simbad.u-strasbg.fr",debug=1)
>>> SimbadClient.object = 'M31'
>>> SimbadClient.startyear = '1910'
>>> SimbadClient.endyear = '1990'
>>> SimbadClient.journals = ['LicOB','PASP']
>>> SimbadClient.makeQuery()
>>> print SimbadClient.result
"""
import re
import sys
import time
class NoQueryElementsError(Exception):
pass
class IncorrectInputError(Exception):
pass
class Client:
# alternative: http://simbad.u-strasbg.fr
def_baseURL = 'http://simbad.harvard.edu'
def __init__(self, URL=None, proxy=None, debug=0):
self.debug = debug
self.baseURL = URL or self.def_baseURL
self.proxees = {}
if proxy:
self.proxees['http'] = proxy
self.elements = []
self.startyear= ''
self.endyear = ''
self.journals = []
self.pstring = ''
self.radius = ''
self.ra = ''
self.dec = ''
self.equinox = ''
self.epoch = ''
self.frame = ''
self.frames = ['ICRS','FK4','FK5','GAL','SGAL','ECL']
self.error = ''
self.__preamble = 'simbad/sim-script?submit=submit+script&script='
self.object = ''
self.result = ''
self.script = ''
self.bibcode = ''
self.qFormats = {'bibquery':'%BIBCODELIST',
'objquery':'%IDLIST(1)|%COO(d;A|D)|%OTYPE|%SP(S,Q,B)|%MT(M,Q,B)'}
self.stime = time.time()
def makeQuery(self,makelist=1):
ppat = re.compile('([0-9\.\ ]+)\s+([\-\+][0-9\.\ ]+)')
rpat = re.compile('([0-9]+)\s+([0-9]+)\s*([0-9]+)?')
self.qType = 'bibquery'
self.script = ''
self.elements = []
if len(self.elements) == 0:
self.__setscriptheader()
if len(self.elements) == 0:
raise NoQueryElementsError
if self.pstring:
pos = re.sub('[\'\"]','',self.pstring)
try:
radec,rad = pos.split(':')
except ValueError:
rad = ''
radec = pos
rmat = rpat.search(rad)
if rmat:
try:
rad = "%sh%sm%ss" % (rmat.group(1),rmat.group(2),int(rmat.group(3)))
except (IndexError, TypeError):
if int(rmat.group(1)) > 0:
rad = "%sh%sm" % (rmat.group(1),rmat.group(2))
else:
rad = "%sm" % rmat.group(2)
pmat = ppat.search(radec)
try:
self.ra = pmat.group(1)
self.dec= pmat.group(2)
except:
raise IncorrectInputError, "coordinate string could not be parsed"
if rad:
if re.search('m',rad):
s | elf.radius = rad
else:
self.radius = "%sd"%rad
if self.object:
if self.radius:
if self.radius[-1] not in ['h','m','s','d']:
raise IncorrectInputError, "radius is missing qualifier!"
self.elements.append('query ~ %s radius=%s'%
(self.object,self.radius))
else:
self.elements.append('query id %s'%self.ob | ject)
elif self.ra and self.dec:
if self.dec[0] not in ['+','-']:
raise IncorrectInputError, "DEC must start with '+' or '-'!"
if self.radius:
if self.radius[-1] not in ['h','m','s','d']:
raise IncorrectInputError, "radius is missing qualifier!"
ra = self.ra
dec= self.dec
coo_query = 'query coo %s %s radius=%s'% (ra,dec,self.radius)
else:
ra = self.ra
dec= self.dec
coo_query = 'query coo %s %s'%(ra,dec)
if self.frame and self.frame in self.frames:
coo_query += " frame %s" % self.frame
if self.equinox:
coo_query += " equi=%s" % self.equinox
if self.epoch:
coo_query += "epoch=%s" % self.epoch
self.elements.append(coo_query)
else:
self.result = ''
raise IncorrectInputError
self.script = "\n".joi |
KEHANG/RMG-Py | rmgpy/rmg/model.py | Python | mit | 87,614 | 0.006106 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', W | ITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABI | LITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains classes for working with the reaction model generated by RMG.
"""
import logging
import math
import numpy
import os.path
import itertools
from rmgpy.display import display
#import rmgpy.chemkin
import rmgpy.constants as constants
from rmgpy.quantity import Quantity
import rmgpy.species
from rmgpy.thermo import Wilhoit, NASA, ThermoData
from rmgpy.pdep import SingleExponentialDown
from rmgpy.statmech import Conformer
from rmgpy.data.base import Entry, ForbiddenStructureException
from rmgpy.data.thermo import *
from rmgpy.data.solvation import *
from rmgpy.data.kinetics import *
from rmgpy.data.statmech import *
from rmgpy.transport import TransportData
import rmgpy.data.rmg
#needed to call the generate3dTS method in Reaction class
from rmgpy.reaction import Reaction
from pdep import PDepReaction, PDepNetwork, PressureDependenceError
# generateThermoDataFromQM under the Species class imports the qm package
################################################################################
class Species(rmgpy.species.Species):
solventName = None
solventData = None
solventViscosity = None
diffusionTemp = None
def __init__(self, index=-1, label='', thermo=None, conformer=None,
molecule=None, transportData=None, molecularWeight=None,
dipoleMoment=None, polarizability=None, Zrot=None,
energyTransferModel=None, reactive=True, coreSizeAtCreation=0):
rmgpy.species.Species.__init__(self, index, label, thermo, conformer, molecule, transportData, molecularWeight, dipoleMoment, polarizability, Zrot, energyTransferModel, reactive)
self.coreSizeAtCreation = coreSizeAtCreation
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Species, (self.index, self.label, self.thermo, self.conformer, self.molecule, self.transportData, self.molecularWeight, self.dipoleMoment, self.polarizability, self.Zrot, self.energyTransferModel, self.reactive, self.coreSizeAtCreation),)
def generateThermoData(self, database, thermoClass=NASA, quantumMechanics=None):
"""
Generates thermo data, first checking Libraries, then using either QM or Database.
If quantumMechanics is not None, it is asked to calculate the thermo.
Failing that, the database is used.
The database generates the thermo data for each structure (resonance isomer),
picks that with lowest H298 value.
It then calls :meth:`processThermoData`, to convert (via Wilhoit) to NASA
and set the E0.
Result stored in `self.thermo` and returned.
"""
from rmgpy.data.thermo import saveEntry
thermo0 = None
thermo0 = database.thermo.getThermoDataFromLibraries(self)
if thermo0 is not None:
logging.info("Found thermo for {0} in thermo library".format(self.label))
assert len(thermo0) == 3, "thermo0 should be a tuple at this point: (thermoData, library, entry)"
thermo0 = thermo0[0]
elif quantumMechanics:
molecule = self.molecule[0]
if quantumMechanics.settings.onlyCyclics and not molecule.isCyclic():
pass
else: # try a QM calculation
if molecule.getRadicalCount() > quantumMechanics.settings.maxRadicalNumber:
# Too many radicals for direct calculation: use HBI.
logging.info("{0} radicals on {1} exceeds limit of {2}. Using HBI method.".format(
self.molecule[0].getRadicalCount(),
self.label,
quantumMechanics.settings.maxRadicalNumber,
))
# Need to estimate thermo via each resonance isomer
thermo = []
for molecule in self.molecule:
molecule.clearLabeledAtoms()
molecule.updateAtomTypes()
tdata = database.thermo.estimateRadicalThermoViaHBI(molecule, quantumMechanics.getThermoData)
if tdata is not None:
thermo.append(tdata)
if thermo:
H298 = numpy.array([t.getEnthalpy(298.) for t in thermo])
indices = H298.argsort()
for i, ind in enumerate(indices):
logging.info("Resonance isomer {0} {1} gives H298={2:.0f} J/mol".format(i, self.molecule[ind].toSMILES(), H298[ind]))
self.molecule = [self.molecule[ind] for ind in indices]
molecule = self.molecule[0]
thermo0 = thermo[indices[0]]
else:
pass
with open('thermoHBIcheck.txt','a') as f:
f.write('// {0!r}\n'.format(thermo0).replace('),','),\n// '))
f.write('{0}\n'.format(molecule.toSMILES()))
f.write('{0}\n\n'.format(molecule.toAdjacencyList(removeH=False)))
else: # Not too many radicals: do a direct calculation.
thermo0 = quantumMechanics.getThermoData(molecule) # returns None if it fails
if thermo0 is not None:
# Write the QM molecule thermo to a library so that can be used in future RMG jobs.
quantumMechanics.database.loadEntry(index = len(quantumMechanics.database.entries) + 1,
label = molecule.toSMILES(),
molecule = molecule.toAdjacencyList(),
thermo = thermo0,
shortDesc = thermo0.comment
)
if thermo0 is None:
thermo0 = database.thermo.getThermoData(self)
return self.processThermoData(database, thermo0, thermoClass)
def processThermoData(self, database, thermo0, thermoClass=NASA):
"""
Converts via Wilhoit into required `thermoClass` and sets `E0`.
Resulting thermo is stored (`self.thermo`) and returned.
"""
# Always convert to Wilhoit so we can compute E0
if isinstance(thermo0, Wilhoit):
wilhoit = thermo0
elif isinstance(thermo0, ThermoData):
Tdata = thermo0._Tdata.value_si
Cpdata = thermo0._Cpdata.value_si
|
Andr3iC/juriscraper | opinions/united_states/state/indtc.py | Python | bsd-2-clause | 470 | 0 | """
Scraper for Indiana Tax Court
CourtID: indtc
Court Short Name: Ind. Tax.
Auth: Jon Andersen <janderse@gmail.com>
Reviewer: mlr
History:
2014-09-03: Created by Jon Andersen
"""
from juriscraper.opinions.united_states.state import ind
class Site(ind.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__ | module__
self.url = 'h | ttp://www.in.gov/judiciary/opinions/taxcourt.html'
|
vileopratama/vitech | src/addons/point_of_sale/__openerp__.py | Python | mit | 2,836 | 0.001058 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 20,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'depends': ['sale_stock', 'barcodes'],
'data': [
'data/report_paperformat.xml',
'data/default_barcode_patterns.xml',
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'wizard/pos_box.xml',
'wizard/pos_confirm.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'views/templates.xml',
'point_of_sale.xml',
'point_of_sale_report.xml',
'point_of_sale_view.xml',
'point_of_sale_sequence.xml',
'point_of_sale_data.xml',
'report/pos_order_report_view.xml',
'point_of_sale_workflow.xml',
'account_statement_view.xml',
'account_statement_report.xml',
'res_users_view.xml',
'res_partner_view.xml',
'res_config_view.xml',
'views/report_statement.xml',
'views/report_usersproduct.xml',
'views/report_receipt.xml',
'views/r | eport_saleslines.xml',
'views/report_detailsofsales.xml',
'views/report_payment.xml',
'views/report_sessionsumm | ary.xml',
'views/report_userlabel.xml',
'views/point_of_sale.xml',
'point_of_sale_dashboard.xml',
],
'demo': [
'point_of_sale_demo.xml',
],
'test': [
'../account/test/account_minimal_test.xml',
'test/tests_before.xml',
'test/00_register_open.yml',
'test/01_order_to_payment.yml',
'test/02_order_to_invoice.yml',
'test/point_of_sale_report.yml',
'test/account_statement_reports.yml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
'auto_install': False,
}
|
juanchodepisa/sbtk | SBTK_League_Helper/update_tournaments.py | Python | mit | 6,552 | 0.0058 | from src.interfacing.ogs.connect import Authentication
import codecs
import sys
import os
from time import sleep
def loadList(pNameFile):
iList = []
with codecs.open(pNameFile, "r", "utf-8") as f:
for line in f:
iList.append(line)
return iList
if __name__ == "__main__":
a = Authentication("Kuksu League", "", testing=False);
iGroupNames = loadList("E:/Project/OGS/OGS-League/group_names.txt");
iGroupIDs = loadList("E:/Project/OGS/OGS-League/group_ids.txt");
nGroups = len(iGroupNames);
for i in range(nGroups):
iGroupNames[i] = iGroupNames[i].replace("\r\n", "");
iGroupNames[i] = iGroupNames[i].replace("\n", "");
iGroupIDs[i] = iGroupIDs[i].replace("\r\n", "");
iGroupIDs[i] = iGroupIDs[i].replace("\n", "");
iGroupIDs[i] = int(iGroupIDs[i]);
iDescription = """
Kuksu Main Title Tournament 9th Cycle Group %s
Title Holder: <a href='https://online-go.com/user/view/35184/vitality'>vitality (5d)</a>
Previous cycles:
<table style="text-align:center;" border='2'>
<tr><th rowspan=2>Cycle</th><td colspan=3><b>Title Match</b></td><td colspan=3><b>Title Tournament</b></td></tr>
<tr>
<th>Winner</th><th>Score</th><th>Runner-up</th>
<th>Winner<img src='https://a00ce0086bda2213e89f-570db0116da8eb5fdc3ce95006e46d28.ssl.cf1.rackcdn.com/4.2/img/trophies/gold_title_19.png' alt='Gold'></img></th>
<th>Runner-up<img src='https://a00ce0086bda2213e89f-570db0116da8eb5fdc3ce95006e46d28.ssl.cf1.rackcdn.com/4.2/img/trophies/silver_title_19.png' alt='Silver'></img></th>
<th>3rd Place<img src='https://a00ce0086bda2213e89f-570db0116da8eb5fdc3ce95006e46d28.ssl.cf1.rackcdn.com/4.2/img/trophies/bronze_title_19.png' alt='Bronze'></img></th>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2375'>1</a></td>
<td><b>luke</b></td><td></td><td></td>
<td><b>luke (2d)</b></td><td>davos</td><td>gomad361</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2384'>2</a></td>
<td><b>gomad361</b></td><td>3-2</td><td>luke</td>
<td><b>luke (2d)</b></td><td>gomad361</td><td>hotspur</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2391'>3</a></td>
<td><b>Uberdude</b></td><td>∗</td><td>gomad361</td>
<td><b>Uberdude (6d)</b></td><td>KyuT</td><td>marigo</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2406'>4</a></td>
<td><b>Uberdude</b></td><td>5-0</td><td>KyuT</td>
<td><b>KyuT (4d)</b></td><td>quiller</td><td>morituri</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2424'>5</a></td>
<td><b>Uberdude</b></td><td>5-0</td><td>gomad361</td>
<td><b>gomad361 (2d)</b></td><td>morituri</td><td>betterlife</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2439'>6</a></td>
<td><b>Uberdude</b></td><td>5-0</td><td>Elin</td>
<td><b>Elin (3d)</b></td><td>gomad361</td><td>morituri</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2460'>7</a></td>
<td><b>Uberdude</b></td><td>3-2</td><td>vitality</td>
<td><b>vitality (5d)</b></td><td>Elin</td><td>gomad361</td>
</tr>
<tr>
<td><a href='https://online-go.com/tournament/2475'>8</a></td>
<td><b>vitality</b></td><td>∗</td><td>Uberdude</td>
<td><b>vitality (5d)</b></td><td>nrx</td><td>gojohn</td>
</tr>
<tr>
<td rowspan=5><a href='#'>9</a></td>
<td rowspan=5 colspan=3></td>
<td colspan=3>
<a href='https://online-go.com/tournament/12653'>[A]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12654'>[B1]</a>
<a href='https://online-go.com/tournament/12655'>[B2]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12656'>[C1]</a>
<a href='https://online-go.com/tournament/12657'>[C2]</a>
<a href='https://online-go.com/tournament/12658'>[C3]</a>
<a href='https://online-go.com/tournament/12659'>[C4]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12660'>[D1]</a>
<a href='https://online-go.com/tournament/12661'>[D2]</a>
<a href='https://online-go.com/tournament/12662'>[D3]</a>
<a href='https://online-go.com/tournament/12663'>[D4]</a>
<a href='https://online-go.com/tournament/12664'>[D5]</a>
<a href='https://online-go.com/tournament/12665'>[D6]</a>
<a href='https://online-go.com/tournament/12666'>[D7]</a>
<a href='https://online-go.com/tournament/12667'>[D8]</a>
</td>
</tr>
<tr>
<td colspan=3>
<a href='https://online-go.com/tournament/12668'>[E1]</a>
<a href='https://online-go.com/tournament/12669'>[E2]</a>
<a href='https://online-go.com/tournament/12670'>[E3]</a>
<a href='https://online-go.com/tournament/12671'>[E4]</a>
<a href='https://online-go.com/tournament/12672'>[E5]</a>
<a href='https://online-go.com/tournament/12673'>[E6]</a>
</td>
</tr>
</table>
∗ means the games were finished by timeout or retiring.
Rules could be found <a href='https://forums.online-go.com/t/league-format-kuksu-title-tournament-rules-and-discussion/5191'>here</a>.
""" % iGroupNames[i];
a.put(['tournaments', iGroupIDs[i]], {"description": iDescription
});
print("Tournament %s with id %d updated.\n" % (iGroupNames[i], iGroupIDs[i]));
sleep(2);
# tourney id 7370
"""
iTournament = a.post | (['tournaments'],{
"id":12650,
"name":"Test Tournament 2",
"group":515,
"tournament_type":"roundrobin",
"description":"<b>Test 3</b>",
"board_size":19,
"handicap":0, #default -1 for auto
"time_start": "2015-12-01T00:00:00Z",
"time_control_parameters":{
"time_control":"fischer",
"initial_time":604800,
"max_time":604800,
"time_increment":86400
},
"rules": "korean",
"exclusivity": "invite", # open, group. default
"e | xclude_provisional": False, # default
"auto_start_on_max": True, # default
"analysis_enabled": True, #default
"settings":{
"maximum_players":10,
},
"players_start": 6, #default
"first_pairing_method": "slide", #slaughter, random, slide, strength . default
"subsequent_pairing_method": "slide", # default
"min_ranking":0,
"max_ranking":36
});
#print("Hello");
print(iTournament["id"]);
"""
#print "Tournament %s is created." % iTournament["id"];
# r= a.post (['tournaments', 12642, 'players'], app_param= {"player_id":40318} )
# print (r)
|
zubie7a/Algorithms | HackerRank/Cracking_The_Coding_Interview/Data_Structures/05_Balanced_Brackets.py | Python | mit | 1,286 | 0.004666 | # https://www.hackerrank.com/challenges/ctci-balanced-brackets
def is_matched(expression):
stack = []
symbols = {
"}" : "{",
")" : "(",
"]" : "["
}
for c in expression:
# If its an opening symbol, put it in the stack.
if c == "{" or c == "(" or c == "[":
stack.append(c)
# If its a closing symbol
else:
# If there's something in the stack and it matches the symbol...
if len(stack) > 0 and | symbols[c] == stack[-1]:
# Remove the symbol on top of the stack.
stack.pop()
# If it was a closing symbol and there's nothing in the stack,
# or if what was on top of the stack doesn't match it...
else:
# It is not balanced.
return False
# If after consuming the expression there's still stuff in the stack,
# then it is not matched. e.g. expression "{{{{{((((([[[[]"
if len(stack | ) > 0:
return False
# If the stack was empty at the end, then it is matched.
else:
return True
t = int(raw_input())
for i in xrange(t):
expression = raw_input()
if is_matched(expression) == True:
print "YES"
else:
print "NO"
|
harveywwu/vnpy | vnpy/trader/gateway/tkproGateway/__init__.py | Python | mit | 242 | 0 | # encoding: UTF-8
from vnpy.trad | er import vtConstant
from .tkproGateway import TkproGateway
gatewayClass = TkproGateway
gatewayName = 'TKPRO'
gatewayDisplayName = 'TKPRO'
gatewayType = vtConstant.GATEWAYTYPE_EQUITY
gatewayQr | yEnabled = True
|
dongting/sdnac | sdnac/api/rpc.py | Python | apache-2.0 | 1,086 | 0.01105 | # adapted from zmq_server_example.py in tinyrpc
import time, sys
import zmq
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.zmq import ZmqServerTransport
from tinyrpc.server import RPCServer
from tinyrpc.dispatch import RPCDispatcher
class Server(object):
def __init__(self, req_callback):
# print 'initializing Rpc'
self.ctx = zmq.Context()
self.dispatcher = RPCDispatcher()
self.transport = ZmqServerTransport.create(self.ctx, 'tcp://127.0.0.1:8000')
self.req_callback = req_callback
self.rpc_server = RPCServer(
self.transport,
JSONRPCProtocol(),
self.dispatcher
)
self.dispatcher.public(self.request) # register this function (replacing the decorator)
# print 'READYc: '+str(time.clock())
| # sys.exit(0)
self.rpc_server.serve_forever()
# def start(self):
# self.rpc_server.serve_fo | rever()
def request(self, req):
return self.req_callback(req)
|
gsidhu/Pretty_Parser | setup.py | Python | mit | 683 | 0.027818 | import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = | {"packages": ["os","bs4","urllib.request","requests","lxml"], "excludes": ["tkinter"], "include_files":["style.css"]}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
##if sys.platform == "win32":
## base = "Win32GUI"
setup( name = "Pretty Parser",
version = "1.2",
description = "Bare-minimum RSS Feed Aggregator",
options = {"build_exe": b | uild_exe_options},
executables = [Executable("PrettyParser.py", base=base)])
|
kdeldycke/smile_openerp_matrix_widget | smile_matrix_demo/smile_workload.py | Python | gpl-3.0 | 10,721 | 0.01026 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 Smile. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import random
import os
from osv import osv, fields
from smile_matrix_field.matrix_field import matrix, matrix_read_patch, matrix_write_patch, LINE_RENDERING_MODES
class smile_activity_workload(osv.osv):
_name = 'smile.activity.workload'
## Function fields
def _get_additional_line_ids(self, cr, uid, ids, name, arg, context=None):
""" Randomly pick some lines to demonstrate the Matrix's additional_lines parameter
"""
result = {}
for workload in self.browse(cr, uid, ids, context):
# Get even lines
result[workload.id] = [l.id for l in workload.line_ids][::2]
return result
def _get_employee_filter_domain(self, cr, uid, ids, name, arg, context=None):
""" Return a domain to filter employees.
The implemented rule is absolutely arbitrary and is just there to demonstrate usage of matrix's dynamic_domain_property parameter.
"""
result = {}
for workload in self.browse(cr, uid, ids, context):
# Only allow employees with IDs of the same parity of workload's start date
odd_month = datetime.datetime.strptime(workload.start_date, '%Y-%m-%d').date().month % 2
employee_ids = [i for i in self.pool.get('smile.activity.employee').search(cr, uid, [], context=context) if odd_month ^ (not i % 2)]
result[workload.id] = [('id', 'in', employee_ids)]
return result
## Fields definition
_columns = {
'name': fields.char('Name', size=32),
'project_id': fields.many2one('smile.activity.project', "Project", required=True),
'start_date': fields.related('project_id', 'start_date', type='date', string="Start date", readonly=True),
'end_date': fields.related('project_id', 'end_date', type='date', string="End date", readonly=True),
'date_range': fields.related('project_id', 'date_range', type='selection', string="Period date range", readonly=True),
'line_ids': fields.one2many('smile.activity.workload.line', 'workload_id', "Workload lines"),
'additional_line_ids': fields.function(_get_additional_line_ids, string="Additional lines", type='one2many', relation='smile.activity.workload.line', readonly=True, method=True),
'employee_filter': fields.function(_get_employee_filter_domain, string="Employee filter domain", type='string', readonly=True, method=True),
'matrix': matrix(
line_property='line_ids',
line_type='smile.activity.workload.line',
line_inverse_property='workload_id',
cell_property='cell_ids',
cell_type='smile.activity.workload.cell',
cell_inverse_property='line_id',
cell_value_property='quantity',
cell_date_property='date',
date_range_property='date_range',
date_format='%m/%y',
navigation = True,
navigation_size = 12,
highlight_date = datetime.date(datetime.date.today().year, datetime.date.today().month, 1),
line_rendering_dynamic_property = 'line_rendering',
increment_values = [-1, 0.0, 2.71, 3.14],
#tree_definition = [
#{ 'line_property': 'profile_id',
#'resource_type': 'smile.activity.profile',
#'domain': [('name', 'not in', ['Consultant', 'Expert'])],
#},
#{ 'line_property': 'employee_id',
#'resource_type': 'smile.activity.employee',
#'dynamic_domain_property': 'employee_filter',
#},
#],
# XXX 3-level resource test
tree_definition = [
{ 'line_property': 'profile_id',
'resource_type': 'smile.activity.profile',
},
{ 'line_property': 'employee_id',
'resource_type': 'smile.activity.employee',
},
{ 'line_property': 'workload_id',
'resource_type': 'smile.activity.workload',
},
],
additional_columns=[
{'label': "Productivity", 'line_property': 'productivity_index', 'hide_value': True},
{'label': "Performance" , 'line_property': 'performance_index' , 'hide_tree_totals': True},
],
#additional_line_property='additional_line_ids',
column_totals_warning_threshold=None,
non_editable_tree = True,
css_classes=['workload'],
title="Workload lines",
custom_js = open(os.path.join(os.path.dirname(__file__), 'custom.js'), 'r').read(),
),
}
## Native methods
@matrix_read_patch
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return super(smile_activity_workload, self).read(cr, uid, ids, fields, context, load)
@matrix_write_patch()
def write(self, cr, uid, ids, vals, context=None):
return super(smile_activity_workload, self).write(cr, uid, ids, vals, context)
## Custom methods
def modal_window_view(self, cr, uid, ids, context=None):
return {
'name':"View current form in modal window",
'type': 'ir.actions.act_window',
'res_model': 'smile.activity.workload',
'view_type': 'form',
'view_mode': 'form',
'view_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'smile_matrix_demo', 'view_smile_activity_workload_form')[::-1],
'res_id': ids[0],
'nodestroy': False,
'target': 'new',
'context': context,
'toolbar': False,
}
smile_activity_workload()
class smile_activity_workload_line(osv.osv):
_name = 'smile.activity.workload.line'
## Function fields
def _get_random_int(self, cr, uid, ids, name, arg, context=None):
""" Get a random number between 0 and 100
"""
result = {}
for line in self.browse(cr, uid, ids, context):
result[line.id] = random.randrange(0, 100)
return result
## Fields definition
_columns = {
'name': fields.related('employee_id', 'name', type='char', string='Name', size=32, readonly=True),
'line_rendering': fi | elds.selection(LINE_RENDERING_MODES, 'Line rendering mode', select=True, required=True),
'workload_id': fields.many2one('smile.activity.workload', "Workload", required=True, ondelete='cascade'),
'profile_id': fields.many2one('smile.activity.profile', "Profile", required=False),
'employee_id': fields.many2one('smile.activity.employee', "Employee", required=False),
'cell_ids': fiel | ds.one2many('smile.activity.workload.cell', 'line_id', "Cells"),
'performance_index': fields.function(_get_random_int, string="Performance index", type='float', readonly=True, method=True),
'productivity_index': fields.function(_get_random_int, string="Productivity index", type='float', readonly=True, method=True),
}
_defaults = {
'line_rendering': 'selection',
}
## Native method |
trunca/enigma2 | lib/python/OPENDROID/OPD_panel.py | Python | gpl-2.0 | 35,438 | 0.028105 | from Plugins.Plugin import PluginDescriptor
from Screens.PluginBrowser import *
from Screens.Ipkg import Ipkg
from Screens.HarddiskSetup import HarddiskSetup
from Components.ProgressBar import ProgressBar
from Components.SelectionList import SelectionList
from Screens.NetworkSetup import *
from enigma import *
from Screens.Standby import *
from Screens.LogManager import *
from Screens.MessageBox import MessageBox
from Plugins.SystemPlugins.SoftwareManager.Flash_online import FlashOnline
from Components.ActionMap import ActionMap, NumberActionMap, HelpableActionMap
from Screens.Screen import Screen
from Screens.TaskView import JobView
from Components.Task import Task, Job, job_manager, Condition
from GlobalActions import globalActionMap
from Screens.ChoiceBox import ChoiceBox
from Tools.BoundFunction import boundFunction
from Tools.LoadPixmap import LoadPixmap
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN, SCOPE_PLUGINS
from Components.MenuList import MenuList
from Components.FileList import FileList
from Components.Label import Label
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.config import ConfigSubsection, ConfigInteger, ConfigText, getConfigListEntry, ConfigSelection, ConfigIP, ConfigYesNo, ConfigSequence, ConfigNumber, NoSave, ConfigEnableDisable, configfile
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Button import Button
from Components.ActionMap import ActionMap
from Components.SystemInfo import SystemInfo
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from OPENDROID.OscamSmartcard import *
from enigma import eConsoleAppContainer
from Tools.Directories import fileExists
from Tools.Downloader import downloadWithProgress
from boxbranding import getBoxType, getMachineName, getMachineBrand, getBrandOEM
from enigma import getDesktop
from Screens.InputBox import PinInput
import string
from random import Random
import os
import sys
import re, string
font = 'Regular;16'
import ServiceReference
import time
import datetime
inOPD_panel = None
config.softcam = ConfigSubsection()
config.softcam.actCam = ConfigText(visible_width=200)
config.softcam.actCam2 = ConfigText(visible_width=200)
config.softcam.waittime = ConfigSelection([('0',_("dont wait")),('1',_("1 second")), ('5',_("5 seconds")),('10',_("10 seconds")),('15',_("15 seconds")),('20',_("20 seconds")),('30',_("30 seconds"))], default='15')
if os.path.isfile('/usr/lib/enigma2/python/Plugins/Extensions/MultiQuickButton/plugin.pyo') is True:
try:
from Plugins.Extensions.MultiQuickButton.plugin import *
except:
pass
from OPENDROID.BluePanel import *
from OPENDROID.CronManager import *
from OPENDROID.ScriptRunner import *
from OPENDROID.MountManager import *
from OPENDROID.SwapManager import Swap, SwapAutostart
from OPENDROID.SoftwarePanel import SoftwarePanel
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen, RestoreScreen, BackupSelection, getBackupPath, getBackupFilename
import gettext
def _(txt):
t = gettext.dgettext("OPD_panel", txt)
if t == txt:
print "[OPD_panel] fallback to default translation for", txt
t = gettext.gettext(txt)
return t
def command(comand | line, strip=1):
comandline = comandline + " >/tmp/command.txt"
os.system(comandline)
text = ""
if os.path.exists("/tmp/command.txt") is True:
file = open("/tmp/command.txt", "r")
if strip == 1:
for line in file:
text = text + line.strip() + '\n'
else:
for line in file:
text = text + line
if text[-1:] != '\n': text = text + "\n"
file.close()
# if one or last line then remove linefeed
if text[-1:] | == '\n': text = text[:-1]
comandline = text
os.system("rm /tmp/command.txt")
return comandline
boxversion = getBoxType()
machinename = getMachineName()
machinebrand = getMachineBrand()
OEMname = getBrandOEM()
OPD_panel_Version = 'OPD PANEL V1.4 (By OPD-Team)'
print "[OPD_panel] machinebrand: %s" % (machinebrand)
print "[OPD_panel] machinename: %s" % (machinename)
print "[OPD_panel] oem name: %s" % (OEMname)
print "[OPD_panel] boxtype: %s" % (boxversion)
panel = open("/tmp/OPD_panel.ver", "w")
panel.write(OPD_panel_Version + '\n')
panel.write("Machinebrand: %s " % (machinebrand)+ '\n')
panel.write("Machinename: %s " % (machinename)+ '\n')
panel.write("oem name: %s " % (OEMname)+ '\n')
panel.write("Boxtype: %s " % (boxversion)+ '\n')
panel.close()
ExitSave = "[Exit] = " +_("Cancel") +" [Ok] =" +_("Save")
class ConfigPORT(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ".", limits = [(1,65535)], default = default)
def main(session, **kwargs):
session.open(OPD_panel)
def Apanel(menuid, **kwargs):
if menuid == "mainmenu":
return [(_("OPD_panel"), main, "OPD_panel", 3)]
else:
return []
def Plugins(**kwargs):
return [
PluginDescriptor(name='OPD_panel', description='OPD_panel GUI 16/5/2016', where=PluginDescriptor.WHERE_MENU, fnc=Apanel),
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART], fnc=camstart),
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART], fnc=SwapAutostart),
PluginDescriptor(name='OPD_panel', description='OPD_panel GUI 16/5/2016', where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=main)]
MENU_SKIN = '<screen position="center,center" size="950,470" title="OPD Panel - Main Menu" >\n\t<ePixmap pixmap="/usr/lib/enigma2/python/OPENDROID/icons/redlogo.png" position="0,380" size="950,84" alphatest="on" zPosition="1"/>\n\t<ePixmap pixmap="/usr/lib/enigma2/python/OPENDROID/icons/opendroid_info.png" position="510,11" size="550,354" alphatest="on" zPosition="1"/>\n\t\t<widget source="global.CurrentTime" render="Label" position="450, 340" size="500,24" font="Regular;20" foregroundColor="#FFFFFF" halign="right" transparent="1" zPosition="5">\n\t\t<convert type="ClockToText">>Format%H:%M:%S</convert>\n\t</widget>\n\t<eLabel backgroundColor="#56C856" position="0,330" size="950,1" zPosition="0" />\n <widget name="Mlist" position="70,110" size="705,260" itemHeight="50" scrollbarMode="showOnDemand" transparent="1" zPosition="0" />\n\t<widget name="label1" position="10,340" size="490,25" font="Regular;20" transparent="1" foregroundColor="#f2e000" halign="left" />\n</screen>'
CONFIG_SKIN = '<screen position="center,center" size="600,440" title="PANEL Config" >\n\t<widget name="config" position="10,10" size="580,377" enableWrapAround="1" scrollbarMode="showOnDemand" />\n\t<widget name="labelExitsave" position="90,410" size="420,25" halign="center" font="Regular;20" transparent="1" foregroundColor="#f2e000" />\n</screen>'
INFO_SKIN = '<screen name="OPD_panel" position="center,center" size="730,400" title="OPD_panel" >\n\t<widget name="label2" position="0,10" size="730,25" font="Regular;20" transparent="1" halign="center" foregroundColor="#f2e000" />\n\t<widget name="label1" position="10,45" size="710,350" font="Console;20" zPosition="1" backgroundColor="#251e1f20" transparent="1" />\n</screen>'
INFO_SKIN2 = '<screen name="OPD_panel" position="center,center" size="530,400" title="OPD_panel" backgroundColor="#251e1f20">\n\t<widget name="label1" position="10,50" size="510,340" font="Regular;15" zPosition="1" backgroundColor="#251e1f20" transparent="1" />\n</screen>'
class PanelList(MenuList):
if (getDesktop(0).size().width() == 1920):
def __init__(self, list, font0 = 38, font1 = 28, itemHeight = 60, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", font0))
self.l.setFont(1, gFont("Regular", font1))
self.l.setItemHeight(itemHeight)
else:
def __init__(self, list, font0 = 24, font1 = 16, itemHeight = 50, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", font0))
|
cea-hpc/shine | lib/Shine/Configuration/ModelFile.py | Python | gpl-2.0 | 24,131 | 0.000622 | # Copyright (C) 2010-2014 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""
ModelFile handles ModelFile configuration file manipulation.
You must declare each supported field and syntax:
model = ModelFile()
model.add_element('name', check='string')
...
You can create your own types.
class MyElement(SimpleElement):
def _validate(self, data):
if len(data) > 10:
raise ModelFileValueError("Data too long")
model.add_custom('title', MyElement())
You can load and save from disk.
model = ModelFile()
model.load("/tmp/modelfile")
model.save("/tmp/modelfile.copy")
"""
import re
import copy
from ClusterShell.NodeSet import RangeSet
class ModelFileValueError(Exception):
"""Raise when a bad value is used when adding a data or parsing a file."""
class SimpleElement(object):
"""
A data storing class for ModelFile.
It handles the checking and storing of a value based on checking-type.
It could be inherited to implement custom type for ModelFile.
"""
def __init__(self, check, default=None, values=None):
self._content = None
self._check = check
self._default = default
self._values = values or []
def emptycopy(self):
"""Return a new empty copy of this element, with the same attributes."""
return type(self)(self._check, self._default, self._values)
def copy(self):
"""Return a deep copy of an SimpleElement."""
return copy.deepcopy(self)
# Readers
def get(self, default=None):
"""
Return, by priority, one of those: the element content or the element
default or the provided default value.
"""
if self._content is not None:
return self._content
elif self._default is not None:
return self._default
else:
return default
def content(self, default=None):
"""For SimpleElement, behave like get()."""
return self.get(default)
def __iter__(self):
yield self._content
def __str__(self):
return str(self._content)
def __len__(self):
return int(self._content is not None)
def key(self):
"""
Unique identifier used for comparison in MultipleElement list.
By default this is self.
"""
return self
def __hash__(self):
return hash(self.__class__) ^ hash(self._content) ^ hash(self._check)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._content == other._content and self._check == other._check
def as_dict(self):
"""Helper method for ModelFile.as_dict().
Return same content than get().
"""
return self.get()
def diff(self, other):
"""
Compare this SimpleElement with another one and return a tuple with 3
elements. If the two elements are the same, the tuple contains:
3 empty copy is this element,
if they are different, it returns:
an empty copy, a copy of the other element and an empty copy.
"""
if self == other:
return self.emptycopy(), self.emptycopy(), self.emptycopy()
elif len(self) == 0 and len(other) == 1:
return other.copy(), self.emptycopy(), self.emptycopy()
elif len(self) == 1 and len(other) == 0:
return self.emptycopy(), self.emptycopy(), self.copy()
else:
return self.emptycopy(), other.copy(), self.emptycopy()
# Setters
def replace(self, data):
"""Replace the content with `data'."""
self.clear()
self.add(data)
def clear(self):
"""Clear the element content."""
self._content = None
def _validate(self, value):
"""
Check that @value is valid regarding to SimpleElement check type.
This function will convert @value to the type correspoding to the check
type, and returns it.
It raises a ModelFileValueError is check type is not respected.
"""
if self._check == 'digit':
try:
if str(value)[0:2] == "0x":
retval = int(value, base=16)
else:
retval = int(value)
except ValueError as error:
raise ModelFileValueError(str(error))
return retval
elif self._check == 'boolean':
if value.lower() in ('yes', 'true', '1'):
return True
elif value.lower() in ('no', 'false', '0'):
return False
else:
raise ModelFileValueError("'%s' not a boolean value" % value)
elif self._check == 'string':
if not isinstance(value, str):
raise ModelFileValueError("'%s' not a string" % value)
return str(value)
elif self._check == 'enum':
if str(value) not in [str(val) for val in self._values]:
msg = "%s not in %s" % (value, self._values)
raise ModelFileValueError(msg)
return [val for val in self._values if str(val) == str(value)].pop()
elif self._check == 'path':
if not re.match(r"^\/([\.\w:-]+/)*[\.\w:-]+/?$", value):
raise ModelFileValueError("'%s' is not a valid path" % value)
return value
else:
raise TypeError("Check type: %s is unmanaged" % self._check)
def add(self, value):
"""Validate and set the SimpleElement value.
Raises a ModelFileValueError if called twice. See MultipleElement if
you need multiple values.
"""
# Simple element could not 'add' several times. See MultipleElement.
if self._content is not None:
raise KeyError("Content already set to '%s'" % self._content)
# Check value has a valid content.
self._content = self._validate(value)
def parse(self, data):
"""Parse, validate and set the SimpleElement value.
See add() for more details.
"""
self.add(data)
def _changify(newobj, oldobj):
"""
Store comparison information into `newobj' based on `oldobj'.
MultipleElement can have different type of elements, based on different
classes. These classes have no common super class.
MultipleElement.diff() method returns a list of changed objects. For
convenience, those objects will be polymorphic and will contain their
update information and also the old ones.
"""
setattr(newobj, 'old', oldobj)
setattr(newobj, 'chgkeys', set())
# For convenience, pre-compute the list of modified keys
for data in newobj.old.diff(newobj):
newobj.chgkeys.update(data)
# If needed, we can also add some methods dynamically.
# This function could evolve to a generic class which will be used by
# SimpleElement, MultipleElement and ModelFile if needed.
return newobj
class MultipleElement(object):
"""
This is a container over a list of non-multiple element, like SimpleElement
or ModelFile.
It uses the provided instance, at ini | t, as a reference for all the data that
it will have to manage.
"""
def __init__(self, orig_elem, fold=False):
self. | _origelem = orig_elem
self._elements = []
self.fold = fold
def emptycopy(self):
"""Return a new empty |
zmetcalf/Triple-Draw-Deuce-to-Seven-Lowball-Limit | triple_draw_poker/model/HandDetails.py | Python | gpl-2.0 | 1,109 | 0.000902 | from triple_draw_poker.model.Pot import Pot
class HandDetails:
def __init__(self):
self.pot = Pot()
self.raised = 0
self.street = 0
self.number_of_streets = 4
self.in_draw = False
self.hands = []
self.dealt_cards_index = 0
def getDealtCardsIndex(self):
return dealt_cards_index
def getHands(self):
return self.hands
def getPot(self):
return self.pot
def getRaised(self):
return self.raised
def getStreet(self):
return self.street
def getStreetPremium(self):
if self.street < 3:
return 2
return 1
def getNumberOfStreets(self):
return self.number_of_streets
def getInDraw(self):
return self.in_draw
def setDealtCardsIndex(self, index):
self.dealt_cards_index = index
def addHand(self, hand):
self.hands.append(hand)
def incrementRaised | (self):
self.raised += 1
def incrementStreet(self):
self | .street += 1
def changeInDraw(self):
self.in_draw = not self.in_draw
|
EternityForest/KaithemAutomation | kaithem/src/plugins/startup/DLNARenderPlugin/__init__.py | Python | gpl-3.0 | 2,277 | 0.004392 | from mako.lookup import TemplateLookup
from src import devices, alerts, scheduling, messagebus, workers
import subprocess
import os
import mako
import time
import threading
import logging
import weakref
import base64
import traceback
import shutil
import socket
import uuid
from src import widgets
logger = logging.Logger("plugins.dlnarender")
templateGetter = TemplateLookup(os.path.dirname(__file__))
defaultSubclassCode = """
class CustomDeviceType(DeviceType):
pass
"""
class DLNARenderAgent(devices.Device):
deviceTypeName = 'DLNARenderAgent'
readme = os.path.join(os.path.dirname(__file__), "README.md")
defaultSubclassCode = defaultSubclassCode
description ="Create an instance of gmediarender to recieve media. Audio is piped to JACK."
def close(self):
devices.Device.close(self)
try:
with self.plock:
self.closed = True
self.process.terminate()
except:
print(traceback.format_exc())
def __del__(self):
self.close()
def __init__(self, name, data):
devices.Device.__init__(self, name, data)
self.closed = False
self.plock = threading.RLock()
try:
def f(*a):
with self.plock:
if self.closed:
return
x = ['gmediarender', '--gstout-videosink', 'glimagesink', '--gstout-audiopipe',
'jackaudiosink slave-method=0 port-pattern="jkhjkjhkhkhkhkjhk" client-name='+data.get("device.advertiseName", 'DLNA')]
x += ['-f', data.get("device.advertiseName",
socket.gethostname()) or socket.gethostname()]
x += ['-u', data.get("device.uuid",
str(uuid.uuid4())) or str(uuid.uuid4())]
self.process = subprocess.Popen(x)
self.r | estart = f
messagebus.subscribe("/system/jack/started", f)
f()
| except:
self.handleException()
def getManagementForm(self):
return templateGetter.get_template("manageform.html").render(data=self.data, obj=self)
devices.deviceTypes["DLNARenderAgent"] = DLNARenderAgent
|
jupito/dwilib | dwi/tools/pmapinfo.py | Python | mit | 3,191 | 0 | #!/usr/bin/python3
"""Print information about pmaps."""
import numpy as np
import dwi.conf
import dwi.files
from dwi.files import Path
import dwi.image
import dwi.mask
import dwi.util
lambdas = dict(
path=lambda x: x.info['path'],
name=lambda x: x.info['path'].name,
stem=lambda x: x.info['path'].stem,
type=lambda x: x.dtype,
shape=lambda x: shorten(x.shape),
size=lambda x: x.size,
mbb=lambda x: shorten(','.join(f'{slc.start:03d}-{slc.stop:03d}' for slc in
x.mbb())),
mbbshape=lambda x: shorten(x[x.mbb()].shape),
spacing=lambda x: shorten(x.spacing),
centroid=lambda x: ','.join(str(int(round(y))) for y in x.centroid()),
finite=lambda x: np.count_nonzero(np.isfinite(x)),
nonzero=lambda x: np.count_nonzero(x),
neg=lambda x: np.count_nonzero(x < 0),
rfinite=lambda x: lambdas['finite'](x) / x.size,
rnonzero=lambda x: lambdas['nonzero'](x) / x.size,
rneg=lambda x: lambdas['neg'](x) / x.size,
sum=lambda x: np.nansum(x),
mean=lambda x: np.nanmean(x),
std=lambda x: np.nanstd(x),
var=lambda x: np.nanvar(x),
min=lambda x: np.nanmin(x),
median=lambda x: np.nanmedian(x),
max=lambda x: np.nanmax(x),
five=lambda x: shorten(dwi.util.fivenums(x)),
regs=lambda x: dwi.mask.nregions(x.squeeze()),
errors=lambda x: len(x.info['attrs'].get('errors', ())),
ce16=lambda x: cast_errors(x, np.float16),
ce32=lambda x: cast_errors(x, np.float32),
)
def cast_errors(a, dtype):
"""Return the number of finite ndarray elements that do not stay close to
the original value after type casting. See numpy.isclose().
"""
a = a[np.isfinite(a)]
return a.size - np.count_nonzero(np.isclose(a, a.astype(dtype)))
def shorten(o):
"""Make object string and remove all whitespace."""
if isinstance(o, np.ndarray):
o = list(o)
return ''.join(str(o).split())
def parse_args():
epilog = 'Available keys: {}'.format(','.join(sorted(lambdas.keys())))
p = dwi.conf.get_parser(description=__doc__, epilog=epilog)
p.add('path', nargs='+', type=Path,
help='input pmap files')
p.add('-p', '--params', nargs='*',
help='parameters')
p.add('-m', '--masks', metavar='MASKFILE', nargs='+',
help='mask files')
p.add('-k', '--keys', default='shape,path',
help='comma-separated keys for specifiying requested info')
p.add('--ofs', default='\t',
help='output field separator')
return p.parse_args()
def main():
args = parse_args()
keys | = args.keys.split(',')
if args.masks:
mask = dwi.util.unify_masks(dwi.files.read_mask(x) for x in args.masks)
for path in args.path:
try:
pmap = dwi.image.Imag | e.read(path, params=args.params)
except Exception as e:
print('Could not read image: {}: {}'.format(path, e))
continue
if args.masks:
pmap.apply_mask(mask)
fmt = '{k}={v}' if args.verbose else '{v}'
fields = (fmt.format(k=x, v=lambdas[x](pmap)) for x in keys)
print(*fields, sep=args.ofs)
if __name__ == '__main__':
main()
|
atsheehan/pypogs | pypogs/game_container.py | Python | gpl-3.0 | 1,838 | 0.001088 | from pygame.locals import *
from pypogs import player_area
from pypogs import render
class GameContainer(object):
def __init__(self, world, dimensions):
self._player_areas = []
self._world = world
self._dimensions = dimensions
self._is_online = False
def render(self, screen):
if not self._world.in_game():
return
for area in self._player_areas:
area.render(screen)
def tick(self):
if not self._world.in_game():
return
if self._is_online:
self._check_for_server_update()
else:
for area in self._player_areas:
area.tick()
def handle_event(self, event):
if not self._world.in_game():
return
if event.type == KEYDOWN:
self._handle_key_event(event.key)
elif event.type == JOYBUTTONDOWN:
self._handle_joy_button_down_event(event.button)
if self._is_online:
self._send_input_to_server(event)
else:
for area in self._player_areas:
area | .handle_event(event)
def _handle_key_event(self, key):
if key == K_ESCAPE:
self._world.switch_to_menu()
def _handle_ | joy_button_down_event(self, button):
if button == 4:
self._world.switch_to_menu()
def start_new_game(self, player_count):
del self._player_areas[:]
posn = render.GamePositions(self._dimensions[0], self._dimensions[1],
player_count)
for player_id in range(player_count):
new_area = player_area.PlayerArea(posn, player_id)
self._player_areas.append(new_area)
def _check_for_server_update(self):
pass
def _send_input_to_server(self):
pass
|
metaperture/autoargs | examples/usage.py | Python | bsd-3-clause | 857 | 0.011669 | import autoargs
def main(arg1: int, arg2: int, *, val=10, val2:int):
"""my docstring"""
return arg1 + arg2 + val + val2
def example(a, b: int, *others, d: float, e=10):
"" | "Here is my docstring"""
r | eturn str(locals())
import numpy
@cmdable
def f2(x: int, y: int, *z: int, op: {'sum', 'mul'}='mul'):
"aggregate x, y, and z's by mul or sum"
all_vals = [x, y] + list(z)
print(all_vals)
if op == 'sum':
return sum(all_vals)
elif op == 'mul':
return numpy.product(all_vals)
@cmdable
def f1(x: int, y: int, *z: int):
"agg x, y, and z's by sum"
return f2(x, y, *z, op='sum')
@cmdable
def f3(*, op):
if op == "sum":
return f1
elif op == "mul":
return f2
if __name__ == "__main__":
#result = autoargs.autorun(main)
result = autoargs.autorun(example)
print(result)
|
kemaswill/keras | tests/keras/wrappers/test_scikit_learn.py | Python | mit | 4,769 | 0.000629 | import pytest
import numpy as np
from keras.utils.test_utils import get_test_data
from keras.utils import np_utils
from keras import backend as K
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
input_dim = 5
hidden_dims = 5
nb_train = 100
nb_test = 50
nb_class = 3
batch_size = 32
nb_epoch = 1
verbosity = 0
optim = 'adam'
loss = 'categorical_crossentropy'
np.random.seed(42)
(X_train, y_train), (X_test, y_test) = get_test_data(
nb_train=nb_train, nb_test=nb_test, input_shape=(input_dim,),
classification=True, nb_class=nb_class)
def build_fn_clf(hidden_dims):
model = Sequential()
model.add(Dense(input_dim, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(hidden_dims))
model.add(Activation('relu'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(optimizer='sgd', loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def test_clasify_build_fn():
clf = KerasClassifier(
build_fn=build_fn_clf, hidden_dims=hidden_dims,
batch_size=batch_size, nb_epoch=nb_epoch)
assert_classification_works(clf)
def test_clasify_class_build_fn():
class ClassBuildFnClf(object):
def __call__(self, hidden_dims):
return build_fn_clf(hidden_dims)
clf = KerasClassifier(
build_fn=ClassBuildFnClf(), hidden_dims=hidden_dims,
batch_size=batch_size, nb_epoch=nb_epoch)
assert_classification_works(clf)
def test_clasify_inherit_class_build_fn():
class InheritClassBuildFnClf(KerasClassifier):
def __call__(self, hidden_dims):
return build_fn_clf(hidden_dims)
clf = InheritClassBuildFnClf(
build_fn=None, hidden_dims=hidden_dims,
batch_size=batch_size, nb_epoch=nb_epoch)
assert_classification_works(clf)
def assert_classification_works(clf):
clf.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch)
score = clf.score(X_train, y_train, batch_size=batch_size)
assert np.isscalar(score) and np.isfinite(score)
preds = clf.predict(X_test, batch_size=batch_size)
assert preds.shape == (nb_test, )
for prediction in np.unique(preds):
assert prediction in range(nb_class)
proba = clf.predict_proba(X_test, batch_size=batch_size)
assert proba.shape == (nb_test, nb_class)
assert np.allclose(np.sum(proba, axis=1), np.ones(nb_test))
def build_fn_reg(hidden_dims=50):
model = Sequential()
model.add(Dense(input_dim, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(hidden_dims))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('linear'))
model.compile(optimizer='sgd', loss='mean_absolute_error',
metrics=['accuracy'])
return model
def test_regression_build_fn():
reg = KerasRegressor(
build_fn=build_fn_reg, hidden_dims=hidden_dims,
batch_size=batch_size, nb_epoch=nb_epoch)
assert_regression_works(reg)
def test_regression_class_build_fn():
class ClassBuildFnReg(object):
def __call__(self, hidden_dims):
return build_fn_reg(hidden_dims)
reg = KerasRegressor(
build_fn=ClassBuildFnReg(), hidden_dims=hidden_dims,
batch_size=batch_size, nb_epoch=nb_epoch)
assert_regression_works(reg)
def test_regression_inherit_class_build_fn():
class InheritClassBuildFnReg(KerasRegressor):
def __call__(self, hidden_dims):
return build_fn_reg(hidden_dims)
reg = InheritClassBuildFnReg(
build_fn=None, hidden_dims=hidden_dims,
batch_size=batch_size, nb_epoch=nb_epoch)
assert_regression_works(reg)
def assert_regression_works(reg):
reg.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch)
score = reg.score(X_train, y_train, batch_size=batch_size)
assert np.isscalar(score) and np.isfinite(score)
preds = reg.predict(X_test, batch_size=batch_size)
assert preds.shape == (nb_test, )
if __name__ == '__main__':
pytest.main([__file__])
# Usage of sklearn's grid_search
# from sklearn import grid_search
# parameters = dict(hidden_dims = [20, 30], batch_size=[64, 128], nb_epoch=[2], verbose=[0])
# classifier = Inherit_class_build_fn_clf()
# clf = grid_search.GridSearchCV(classifier, parameters)
# clf.fit(X_train, y_train)
# paramete | rs = dict(hidden_dims = [20, 30], batch_size=[64, 128], nb_epoch=[2], verbose=[0])
# regressor = Inherit_ | class_build_fn_reg()
# reg = grid_search.GridSearchCV(regressor, parameters, scoring='mean_squared_error', n_jobs=1, cv=2, verbose=2)
# reg.fit(X_train_reg, y_train_reg)
|
viaregio/cartridge | cartridge/shop/checkout.py | Python | bsd-2-clause | 8,239 | 0.000607 | """
Checkout process utilities.
"""
from django.contrib.auth.models import SiteProfileNotAvailable
from django.utils.translation import ugettext as _
from django.template.loader import get_template, TemplateDoesNotExist
from mezzanine.conf import settings
from mezzanine.utils.email import send_mail_template
from cartridge.shop.models import Order
from cartridge.shop.utils import set_shipping, set_tax, sign
class CheckoutError(Exception):
"""
Should be raised in billing/shipping and payment handlers for
cases such as an invalid shipping address or an unsuccessful
payment.
"""
pass
def default_billship_handler(request, order_form):
"""
Default billing/shipping handler - called when the first step in
the checkout process with billing/shipping address fields is
submitted. Implement your own and specify the path to import it
from via the setting ``SHOP_HANDLER_BILLING_SHIPPING``.
This function will typically contain any shipping calculation
where the shipping amount can then be set using the function
``cartridge.shop.utils.set_shipping``. The Cart object is also
accessible via ``request.cart``
"""
if not request.session.get("free_shipping"):
settings.use_editable()
set_shipping(request, _("Flat rate shipping"),
settings.SHOP_DEFAULT_SHIPPING_VALUE)
def default_tax_handler(request, order_form):
"""
Default tax handler - called immediately after the handler defined
by ``SHOP_HANDLER_BILLING_SHIPPING``. Implement your own and
specify the path to import it from via the setting
``SHOP_HANDLER_TAX``. This function will typically contain any tax
calculation where the tax amount can then be set using the function
``cartridge.shop.utils.set_tax``. The Cart object is also
accessible via ``request.cart``
"""
settings.use_editable()
set_tax(request, _("Tax"), 0)
def default_payment_handler(request, order_form, order):
"""
Default payment handler - called when the final step of the
checkout process with payment information is submitted. Implement
your own and specify the path to import it from via the setting
``SHOP_HANDLER_PAYMENT``. This function will typically contain
integration with a payment gateway. Raise
cartridge.shop.checkout.CheckoutError("error message") if payment
is unsuccessful.
"""
pass
def default_order_handler(request, order_form, order):
"""
Default order handler - called when the order is complete and
contains its final data. Implement your own and specify the path
to import it from via the setting ``SHOP_HANDLER_ORDER``.
"""
pass
def initial_order_data(request, form_class=None):
"""
Return the initial data for the order form, trying the following in
order:
- request.POST which is available when moving backward through the
checkout steps
- current order details in the session which are populated via each
checkout step, to support user leaving the checkout entirely and
returning
- last order made by the user, via user ID or cookie
- matching fields on an authenticated user and profile object
"""
from cartridge.shop.forms import OrderForm
initial = {}
if request.method == "POST":
initial = dict(request.POST.items())
try:
initial = form_class.preprocess(initial)
except (AttributeError, TypeError):
# form_class has no preprocess method, or isn't callable.
pass
# POST on first step won't include the "remember" checkbox if
# it isn't checked, and it'll then get an actual value of False
# when it's a hidden field - so we give it an empty value when
# it's missing from the POST data, to persist it not checked.
initial.setdefault("remember", "")
if not initial:
# Look for a previous order.
if "order" in request.session:
return request.session["order"]
lookup = {}
if request.user.is_authenticated():
lookup["user_id"] = request.user.id
remembered = request.COOKIES.get("remember", "").split(":")
if len(remembered) == 2 and remembered[0] == sign(remembered[1]):
lookup["key"] = remembered[1]
if lookup:
previous = Order.objects.filter(**lookup).values()[:1]
if len(previous) > 0:
initial.update(previous[0])
if not initial and request.user.is_authenticated():
# No previous order data - try and get field values from the
# logged in user. Check the profile model before the user model
# if it's configured. If the order field name uses one of the
# billing/shipping prefixes, also check for it without the
# prefix. Finally if a matching attribute is callable, call it
# for the field value, to support custom matches on the profile
# model.
user_models = [request.user]
try:
user_models.insert(0, request.user.get_profile())
except SiteProfileNotAvailable:
pass
for order_field in OrderForm._meta.fields:
check_fields = [order_field]
for prefix in ("billing_detail_", "shipping_detail_"):
if order_field.startswith(prefix):
check_fields.append(order_field.replace(prefix, "", 1))
for user_model in user_models:
for check_field in check_fields:
user_value = getattr(user_model, check_field, None)
if user_value:
if callable(user_value):
try:
user_value = user_value()
except TypeError:
continue
if not initial.get(order_field):
initial[order_field] = user_value
# Set initial value for "same billing/shipping" based on
# whether both sets of address fields are all equal.
shipping = lambda f: "shipping_%s" % f[len("billing_"):]
if any([f for f in OrderForm._meta.fields if f.startswith("billing_") and
shipping(f) in OrderForm._meta.fields and
initial.get(f, "") != initial.get(shipping(f), "")]):
initial["same_billing_shipping"] = False
return initial
def send_order_email(request, order):
"""
Send order receipt email on successful order.
"""
settings.use_editable()
order_context = {"order": order, "request": request,
"order_items": order.items.all()}
order_context.update(order.details_as_dict())
try:
get_template("shop/email/order_receipt.html")
except TemplateDoesNotExist:
receipt_template = "email/order_receipt"
else:
receipt_template = "shop/email/order_receipt"
from warnings import warn
warn("Shop email receipt templates have moved from "
"templates/shop/email/ to templates/email/")
send_mail_template(settings.SHOP_ORDER_EMAIL_SUBJECT,
receipt_template, settings.SHOP_ORDER_FROM_EMAIL,
order.billing_detail_email, context=order_context,
fail_silently=settings.DEBUG,
addr_bcc=settings.SHOP_ORDER_EMAIL_BCC or None)
# Set up some constants for identifying each checkout step.
CHECKOUT_STEPS = [{"template": "billing_shipping", "url": "details",
"title": _("Details")}]
CHECKOUT_STEP_FIRST = CHECKOUT_STEP_PAYMENT = CH | ECKOUT_STEP_LAST = 1
if settings.SHOP_CHECKOUT_STEPS_SPLIT:
CHECKOUT_STEPS[0].update({"url": "billing-shipping",
"title": _("Address")})
if settings.SHOP_PAYMENT_STEP_ENABLED:
CHECKOUT_STEPS.append({"template": "payment", "url": "payment",
"title": _("Payment")})
CHE | CKOUT_STEP_PAYMENT = CHECKOUT_STEP_LAST = 2
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION:
CHECKOUT_STEPS.append({"template": "confirmation", "url": "confirmation",
|
D4wN/brickv | src/build_data/windows/OpenGL/raw/GL/NV/texture_compression_vtc.py | Python | gpl-2.0 | 487 | 0.00616 | '''OpenGL extension NV.texture_compression_vtc
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
i | mport ctypes
EXTENSION_NAME = 'GL_NV_texture_compression_vtc'
_DEPRECATED = False
def | glInitTextureCompressionVtcNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
TOTVS/mdmpublic | couchbase-cli/lib/python/pump_bfd2.py | Python | bsd-2-clause | 1,621 | 0.008637 | impor | t pump
import pump_bfd
class BFDSinkEx(pump_bfd.BFDSink):
de | f __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(pump_bfd.BFDSink, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.mode = getattr(opts, "mode", "diff")
self.init_worker(pump_bfd.BFDSink.run)
@staticmethod
def check_spec(source_bucket, source_node, opts, spec, cur):
pump.Sink.check_spec(source_bucket, source_node, opts, spec, cur)
seqno, dep, faillover_log, snapshot_markers = pump_bfd.BFD.find_seqno(opts, spec,
source_bucket['name'],
source_node['hostname'],
getattr(opts, "mode", "diff"))
if 'seqno' in cur:
cur['seqno'][(source_bucket['name'], source_node['hostname'])] = seqno
else:
cur['seqno'] = {(source_bucket['name'], source_node['hostname']): seqno}
if 'failoverlog' in cur:
cur['failoverlog'][(source_bucket['name'], source_node['hostname'])] = faillover_log
else:
cur['failoverlog'] = {(source_bucket['name'], source_node['hostname']): faillover_log}
if 'snapshot' in cur:
cur['snapshot'][(source_bucket['name'], source_node['hostname'])] = snapshot_markers
else:
cur['snapshot'] = {(source_bucket['name'], source_node['hostname']): snapshot_markers} |
mfergie/errorless | errorless.py | Python | gpl-2.0 | 3,992 | 0.003006 | #!/usr/bin/env python
import sys
import re
import subprocess
import cmd
error_regexps = [
"error:",
"warning:",
]
print_error_format = "{id}) {summary}"
class Error():
def __init__(self,
id,
error_type,
match_position):
self.id = id
self.type = error_type
self.match_position = match_position
self.lines = []
def summary(self):
print_error_format = "{id}) {summary}"
summary_string = print_error_format.format(
id=self.id,
summary=self.lines[0][:self.match_position + len(error_regexps[self.type])]
)
return summary_string
class CommandLoop(cmd.Cmd):
prompt = "(errorless) "
def __init__(self, compile_fn):
cmd.Cmd.__init__(self)
self.compile_fn = compile_fn
self.errors = compile_fn()
def do_list(self, line):
"""list
List all errors and warnings.
"""
list_errors(self.errors)
def do_show(self, line):
"""show [error no]
Show all information for a particular error.
"""
try:
error_no = int(line) - 1
print_error(self.errors[error_no])
except ValueError:
print("Syntax: show <error number>")
except IndexError:
print("Error doesn't exist.")
def do_make(self, line):
"""make
Re-run compilation.
"""
self.errors = self.compile_fn()
def do_quit(self, line):
"""
Exit program.
"""
return True
def do_EOF(self, line):
"""
Exit program.
"""
return True
def list_errors(errors):
for error in errors:
print(error.summary())
def print_error(error):
print("Error: {}".format(error.id))
for line in error.lines:
print(line)
def capture_compiler_output(compiler_shell_command):
"""
Runs the compiler command passed as command line argument and returns
a boolean indicating whether the compilation was successful (i.e. the
subprocess returned 0) as well as the resulting lines.
Returns
-------
The lines (note, doesn't return success yet.)
"""
compile_process = subprocess.Popen(
compiler_shell_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = compile_process.communicate()
lines = stderr_data.split('\n')
print("stdout:")
print(stdout_data)
print("stderr:")
print(stderr_data)
# lines = []
# while compile_process.poll() is None:
# stdout_line = compile_process.stdout.readline()
# if stdout_line != '':
# print(stdout_line)
# stderr_line = compile_process.stderr.readline()
# if stderr_line != '':
# print(stderr_line)
# lines.append(stderr_line)
return lines
def parse_errors(compiler_output):
errors = []
regexp_objects = [re.compile(error_regexp) for error_regexp in error_regexps]
for line_ind, line in enumerate(compiler_output):
for errtype_ind, regexp_object in enumerate(regexp_objects):
match_object = regexp_object.search(line)
if match_object is not None:
errors.append(
Error(
len(errors)+1,
errtype_ind,
match_object.start()
)
)
if len(errors) > 0:
| errors[-1].lines.append(line)
return err | ors
def main():
compiler_shell_command = ' '.join(sys.argv[1:])
def compile_fn():
print("Executing {}").format(compiler_shell_command)
compiler_output = capture_compiler_output(compiler_shell_command)
errors = parse_errors(compiler_output)
return errors
CommandLoop(compile_fn).cmdloop()
if __name__ == "__main__":
main()
|
gdelnegro/django-translation-server | translation_server/models.py | Python | mit | 3,854 | 0.004411 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class TranslationType(models.Model):
created = models.DateTimeField(_('DTSM1'), auto_now_add=True, null=True, blank=True, help_text=_('DTST1'))
updated = models.DateTimeField(_('DTSM2'), auto_now=True, null=True, blank=True, help_text=_('DTST2'))
tag = models.CharField(_('DTSM3'), help_text=_('DTST3'), max_length=20, unique=True)
name = models.TextField(_('DTSM4'), help_text=_('DTST4'))
has_auxiliary_text = models.BooleanField(_('DTSM5'), help_text=_('DTST5'), default=True)
auxiliary_tag = models.CharField(_('DTSM6'), help_text=_('DTST6'), max_length=20, unique=True, blank=True,
default=False, null=True)
class Meta:
verbose_name = _('DTSMT1')
verbose_name_plural = _('DTSMTP1')
def __str__(self):
return "%s - %s" % (self.tag, self.name)
def __unicode__(self):
return "%s - %s" % (self.tag, self.name)
class Translation(models.Model):
created = models.DateTimeField(_('DTSM1'), auto_now_add=True, null=True, blank=True, help_text=_('DTST1'))
updated = models.DateTimeField(_('DTSM2'), auto_now=True, null=True, blank=True, help_text=_('DTST2'))
type = models.ForeignKey(TranslationType, on_delete=None, related_name="translation_translation_type",
verbose_name=_('DTSM7'), help_text=_('DTST7'))
tag = models.CharField(_('DTSM8'), help_text=_('DTST8'), max_length=20, unique=True)
text = models.TextField(_('DTSM9'), help_text=_('DTST9'))
auxiliary_tag = models.CharField(_('DTSM10'), help_text=_('DTST10'), max_length=20, blank=True, null=True)
auxiliary_text = models.TextField(_('DTSM11'), help_text=_('DTST11'), blank=True, null=True)
migration_created | = models.BooleanField(_('DTSM12'), help_text=_('DTST12'), default=False)
class Meta:
verbose_name = _('DTSMT2')
verbose_name_plural = _('DTSMTP2')
def __str__(self):
return "%s" % self.tag
|
def __unicode__(self):
return "%s" % self.tag
# @receiver(post_save, sender=Translation, dispatch_uid="update_stock_count")
# def update_translation(sender, instance, **kwargs):
# from django.core.management import call_command
# call_command('make_translation')
class LastTranslationTag(object):
translation_tag = None
def __init__(self, translation_tag, *args, **kwargs):
self.translation_tag = translation_tag
def return_last_tag(self):
if self.translation_tag and len(self.translation_tag) > 0:
from django.db import connection
# todo: add suport to multiple databases
query = "SELECT tag FROM %(translation_table)s WHERE tag LIKE '%(translation_tag)s%%' ORDER BY NULLIF(regexp_replace(TAG, E'\\\\D', '', 'g'), '')::int DESC LIMIT 1" % {
'translation_tag': self.translation_tag,
'translation_table': Translation._meta.db_table
}
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as err:
raise err
else:
result = []
for row in cursor.fetchall():
result = row[0]
if result:
import re
tag = Translation.objects.get(tag=result)
return dict(result=dict(last_tag=result, last_id=re.findall("(\d+)", result)[0], type=tag.type.name,
has_auxiliary_text=tag.type.has_auxiliary_text,
auxiliary_tag=tag.type.auxiliary_tag, tag=tag.type.tag))
else:
return dict(result=dict())
else:
return dict(result=dict())
|
xskylarx/skytube-web | CursoDjango/CursoDjango/settings/local.py | Python | lgpl-3.0 | 265 | 0.003774 | __author__ = 'soporte'
from .base import *
DEBUG = True
T | EMPLATE_DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': Path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATI | C_URL = '/static/' |
legnaleurc/tornado | tornado/test/util.py | Python | apache-2.0 | 4,446 | 0.001125 | from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on a | n external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCP | ython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile
# time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def is_coverage_running():
"""Return whether coverage is currently running.
"""
if 'coverage' not in sys.modules:
return False
tracer = sys.gettrace()
if tracer is None:
return False
try:
mod = tracer.__module__
except AttributeError:
try:
mod = tracer.__class__.__module__
except AttributeError:
return False
return mod.startswith('coverage')
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs)
|
bhagatyj/algorithms | dataStructures/priorityQ/c/priorityQTest.py | Python | mit | 1,399 | 0.042173 | # Input:
# N
# U:p,q
# U:x,y
# .....
# U:a,b
# C:a,y
# Output:
# TRUE/FALSE
import os
from subprocess import Popen, PIPE
import time
count = 0
def printline():
x = ''
for i in range(80):
x = x + "="
print x
def singleTest(qn, ansExp):
global count
child = Popen("./exe", stdin=PIPE, stdout=PIPE)
child.stdin.write(qn)
child.stdin.close()
#print "Waiting for read"
#time.sleep(1)
ansGot = child.stdout.read()
printline()
count = count + 1
print "Test number: " + str(count)
# print "Test input : \n" + qn
# print "Expected Answer :\n" + ansExp
# print "Got this Answer :\n" + ansGot
if (ansExp != ansGot):
raise Exception('Test failed')
else:
print("Passed")
def smallDigitTests():
singleTest('4\n34\n2\n45\n', '2\n4\n34\n45\n')
singleTest('4\n34\n\n2\n45\n', '4\n2\n34\n45\n')
qn = ""
ans = ""
count = 256
for i in range(count):
qn = qn + ("%d\n" %(count-1-i))
ans = ans + ("%d\n" %i)
singleTest(qn, ans)
def runTests():
smallDigitTests()
printline()
def compileCode(source):
printline();
print "Compiling %s" % source
ret = os.system('gcc -g -Wall %s priorityQMain.c -o exe' %source )
if (ret != 0):
raise Exception("D | id not compile")
def cleanup():
os.system('rm exe')
def test():
global count
sources = ["priorityQ_Impl1.c"]
for source in sources:
print
count = 0
compileCode(source)
runTests()
cleanup()
te | st()
|
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/kernel/clientabc.py | Python | apache-2.0 | 2,081 | 0.004325 | """Abstract base class for kernel clients"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import abc
#-----------------------------------------------------------------------------
# Main kernel client class
#-----------------------------------------------------------------------------
class KernelClientABC(object):
"""KernelManager ABC.
The docstrings for this class can be found in the base implementation:
`IPython.kernel.client.KernelClient`
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def kernel(self):
pass
@abc.abstractproperty
def shell_channel_class(self):
pass
@abc.abstractproperty
def iopub_channel_class(self):
pass
@abc.abstractproperty
def hb_channel_class(self):
pass
@abc.abstractproperty
def stdin_channel_class(sel | f):
pass
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
@abc.abstractmethod
def start_channels(self, shell=True, iopub=True, stdin=True, hb=True):
pass
@abc.abstractmethod
def stop_channels(self):
pass
@abc.abstractproperty
def channels_running(self):
pass
@abc.abstractproperty
def s | hell_channel(self):
pass
@abc.abstractproperty
def iopub_channel(self):
pass
@abc.abstractproperty
def stdin_channel(self):
pass
@abc.abstractproperty
def hb_channel(self):
pass
|
chhao91/pysal | pysal/spreg/utils.py | Python | bsd-3-clause | 24,433 | 0.002865 | """
Tools for different procedure estimations
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, \
Pedro V. Amaral pedro.amaral@asu.edu, \
David C. Folch david.folch@asu.edu, \
Daniel Arribas-Bel darribas@asu.edu"
import numpy as np
from scipy import sparse as SP
import scipy.optimize as op
import numpy.linalg as la
from pysal import lag_spatial
import copy
class RegressionPropsY:
"""
Helper class that adds common regression properties to any regression
class that inherits it. It takes no parameters. See BaseOLS for example
usage.
Parameters
----------
Attributes
----------
mean_y : float
Mean of the dependent variable
std_y : float
Standard deviation of the dependent variable
"""
@property
def mean_y(self):
if 'mean_y' not in self._cache:
self._cache['mean_y'] = np.mean(self.y)
return self._cache['mean_y']
@property
def std_y(self):
if 'std_y' not in self._cache:
self._cache['std_y'] = np.std(self.y, ddof=1)
return self._cache['std_y']
class RegressionPropsVM:
"""
Helper class that adds common regression properties to any regression
class that inherits it. It takes no parameters. See BaseOLS for example
usage.
Parameters
----------
Attributes
----------
utu : float
Sum of the squared residuals
sig2n : float
Sigma squared with n in the denominator
sig2n_k : float
Sigma squared with n-k in the denominator
vm : array
Variance-covariance matrix (kxk)
"""
@property
def utu(self):
if 'utu' not in self._cache:
self._cache['utu'] = np.sum(self.u ** 2)
return self._cache['utu']
@property
def sig2n(self):
if 'sig2n' not in self._cache:
self._cache['sig2n'] = self.utu / self.n
return self._cache['sig2n']
@property
def sig2n_k(self):
if 'sig2n_k' not in self._cache:
self._cache['sig2n_k'] = self.utu / (self.n - self.k)
return self._cache['sig2n_k']
@property
def vm(self):
if 'vm' not in self._cache:
self._cache['vm'] = np.dot(self.sig2, self.xtxi)
return self._cache['vm']
def get_A1_het(S):
"""
Builds A1 as in Arraiz et al [Arraiz2010]_
.. math::
A_1 = W' W - diag(w'_{.i} w_{.i})
...
Parameters
----------
S : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A1 matrix in scipy sparse format
"""
StS = S.T * S
d = SP.spdiags([StS.diagonal()], [0], S.get_shape()[0], S.get_shape()[1])
d = d.asformat('csr')
return StS - d
def get_A1_hom(s, scalarKP=False):
"""
Builds A1 for the spatial error GM estimation with homoscedasticity as in
Drukker et al. [Drukker2011]_ (p. 9).
.. math::
A_1 = \{1 + [n^{-1} tr(W'W)]^2\}^{-1} \[W'W - n^{-1} tr(W'W) I\]
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
scalarKP : boolean
Flag to include scalar corresponding to the first moment
condition as in Drukker et al. [1]_ (Defaults to False)
Returns
-------
Implicit : csr_matrix
A1 matrix in scipy sparse format
"""
n = float(s.shape[0])
wpw = s.T * s
twpw = np.sum(wpw.diagonal())
e = SP.eye(n, n, format='csr')
e.data = np.ones(n) * (twpw / n)
num = wpw - e
if not scalarKP:
return num
else:
den = 1. + (twpw / n) ** 2.
return num / den
def get_A2_hom(s):
"""
Builds A2 for the spatial error GM estimation with homoscedasticity as in
Anselin (2011) [Anselin2011]_
.. math::
A_2 = \dfrac{(W + W')}{2}
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A2 matrix in scipy sparse format
"""
return (s + s.T) / 2.
def _moments2eqs(A1, s, u):
'''
Helper to compute G and g in a system of two equations as in
the heteroskedastic error models from Drukker et al. [Drukker2011]_
...
Parameters
----------
A1 : scipy.sparse.csr
A1 matrix as in the paper, different deppending on whether
it's homocedastic or heteroskedastic model
s : W.sparse
Sparse representation of spatial weights instance
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = float(s.shape[0])
A1u = A1 * u
wu = s * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, wu)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = np.dot(u.T, ((A1 + A1.T) * wu))
G12 = -np.dot((wu.T * A1), wu)
G21 = np.dot(u.T, ((s + s.T) * wu))
G22 = -np.dot(wu.T, (s * wu))
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def optim_moments(moments_in, vcX=np.array([0])):
"""
Optimization of moments
...
Parameters
----------
moments : Moments
Instance of gmm_utils.moments_het with G and g
vcX : array
Optional. 2x2 array with the Variance-Covariance matrix to be used as
weights in the optimization (applies Cholesky
decomposition). Set empty by default.
Returns
-------
x, f, d : tuple
x -- position of the minimum
f -- value of func at the minimum
d -- dictionary of information from routine
d['warnflag'] is
0 if converged
1 if too many function evaluations
2 if stopped for another reason, given in d['task']
d['grad'] is the gradient at the minimum (should be 0 ish)
d['funcalls'] is the number of function calls made
"""
moments = copy.deepcopy(moments_in)
if vcX.any():
Ec = np.transpose(la.cholesky(la.inv(vcX)))
moments[0] = np.dot(Ec, moments_in[0])
moments[1] = np.dot(Ec, moments_in[1])
scale = np.min([[np.min(moments[0]), np.min(moments[1])]])
moments[0], moments[1] = moments[0] / scale, moments[1] / scale
if moments[0].shape[0] == 2:
optim_par = lambda par: foptim_par(
np.array([[float(par[0]), float(par[0]) ** 2.]]).T, moments)
start = [0.0]
bounds = [(-1.0, 1.0)]
if moments[0].shape[0] | == 3:
optim_par = lambda par: foptim_par(
np.array([[float(par[0]), float(par[0]) ** 2., float(par[1])]]).T, moments)
start = [0.0, 0.0]
bounds = [(-1.0, 1.0), (0.0, None)]
lambdaX = op.fmin_l_bfgs_b(
optim_par, start, approx_grad=True, bounds=bounds)
return lambdaX[0][0]
| def foptim_par(par, moments):
"""
Preparation of the function of moments for minimization
...
Parameters
----------
lambdapar : float
Spatial autoregressive parameter
moments : list
List of Moments with G (moments[0]) and g (moments[1])
Returns
-------
minimum : float
sum of square r |
TiagoBras/audio-clip-extractor | setup.py | Python | mit | 1,840 | 0.000543 | try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
import os
def version():
with open(os.path.abspath('VERSION')) as f:
return f.read().strip()
raise IOError("Error: 'VERSION' file not found.")
VERSION = version()
setup(
name='audioclipextractor',
version=VERSION,
description='Easily extract multiple clips from audio files',
long_description=open(os.path.abspath('README.md')).read(),
long_description_content_type='text/markdown',
license='MIT',
author='Tiago Bras',
author_email='tiagodsbras@gmail.com',
download_url='https://github.com/TiagoBras/audio-clip-extractor/tarball/v%s' % VERSION,
url='https://github.com/TiagoBras/audio-clip-extractor',
packages=find_packages(exclude=[]),
include_package_data=True,
classifiers= | [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
| 'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Utilities'
],
entry_points='''
[console_scripts]
ace=audioclipextractor.scripts.main:cli
''',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/mpl_toolkits/mplot3d/proj3d.py | Python | bsd-2-clause | 6,988 | 0.019176 | # 3dproj.py
#
"""
Various transforms used for by the 3D code
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
import numpy as np
import numpy.linalg as linalg
def line2d(p0, p1):
"""
Return 2D equation of line in the form ax+by+c = 0
"""
# x + x1 = 0
x0, y0 = p0[:2]
x1, y1 = p1[:2]
#
if x0 == x1:
a = -1
b = 0
c = x1
elif y0 == y1:
a = 0
b = 1
c = -y1
else:
a = (y0-y1)
b = (x0-x1)
c = (x0*y1 - x1*y0)
return a, b, c
def line2d_dist(l, p):
"""
Distance from line to point
line is a tuple of coefficients a,b,c
"""
a, b, c = l
x0, y0 = p
return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
def line2d_seg_dist(p1, p2, p0):
"""distance(s) from line defined by p1 - p2 to point(s) p0
p0[0] = x(s)
p0[1] = y(s)
intersection point p = p1 + u*(p2-p1)
and intersection point lies within segment if u is between 0 and 1
"""
x21 = p2[0] - p1[0]
y21 = p2[1] - p1[1]
x01 = np.asarray(p0[0]) - p1[0]
y01 = np.asarray(p0[1]) - p1[1]
u = (x01*x21 + y01*y21)/float(abs(x21**2 + y21**2))
u = np.clip(u, 0, 1)
d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
return d
def test_lines_dists():
import pylab
ax = pylab.gca()
xs, ys = (0,30), (20,150)
pylab.plot(xs, ys)
points = list(zip(xs, ys))
p0, p1 = points
xs, ys = (0,0,20,30), (100,150,30,200)
pylab.scatter(xs, ys)
dist = line2d_seg_dist(p0, p1, (xs[0], ys[0]))
dist = line2d_seg_dist(p0, p1, np.array((xs, ys)))
for x, y, d in zip(xs, ys, dist):
c = Circle((x, y), d, fill=0)
ax.add_patch(c)
pylab.xlim(-200, 200)
pylab.ylim(-200, 200)
pylab.show()
def mod(v):
"""3d vector length"""
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax):
dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
return np.array([
[1.0/dx,0,0,-xmin/dx],
[0,1.0/dy,0,-ymin/dy],
[0,0,1.0/dz,-zmin/dz],
[0,0,0,1.0]])
def test_world():
xmin, xmax = 100, 120
ymin, ymax = -100, 100
zmin, zmax = 0.1, 0.2
M = world_transformation(xmin, xmax, ymin, ymax, zmin, zmax)
print(M)
def view_transformation(E, R, V):
n = (E - R)
## new
# n /= mod(n)
# u = np.cross(V,n)
# u /= mod(u)
# v = np.cross(n,u)
# Mr = np.diag([1.]*4)
# Mt = np.diag([1.]*4)
# Mr[:3,:3] = u,v,n
# Mt[:3,-1] = -E
## end new
## old
n = n / mod(n)
u = np.cross(V, n)
u = u / mod(u)
v = np.cross(n, u)
Mr = [[u[0],u[1],u[2],0],
[v[0],v[1],v[2],0],
[n[0],n[1],n[2],0],
[0, 0, 0, 1],
]
#
Mt = [[1, 0, 0, -E[0]],
[0, 1, 0, -E[1]],
[0, 0, 1, -E[2]],
[0, 0, 0, 1]]
## end old
return np.dot(Mr, Mt)
def persp_transformation(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,-1,0]
])
def proj_transform_vec(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
return txs, tys, tzs
def proj_transform_vec_clip(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
tis = (vecw[0] >= 0) * (vecw[0] <= 1) * (vecw[1] >= 0) * (vecw[1] <= 1)
if np.sometrue(tis):
tis = vecw[1] < 1
return txs, tys, tzs, tis
def inv_transform(xs, ys, zs, M):
iM = linalg.inv(M)
vec = vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
try:
vecr = vecr/vecr[3]
except OverflowError:
pass
return vecr[0], vecr[1], vecr[2]
def vec_pad_ones(xs, ys, zs):
try:
try:
vec = np.array([xs,ys,zs,np.ones(xs.shape)])
except (AttributeError,TypeError):
vec = np.array([xs,ys,zs,np.ones((len(xs)))])
except TypeError:
vec = np.array([xs,ys,zs,1])
return vec
def proj_transform(xs, ys, zs, M):
"""
Transform the points by the projection matrix
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec(vec, M)
def proj_transform_clip(xs, ys, zs, M):
"""
Transform the points by the projection matrix
and return the clipping result
returns txs,tys,tzs,tis
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec_clip(vec, M)
transform = proj_transform
def proj_points(points, M):
return list(zip(*proj_trans_points(points, M)))
def proj_trans_points(points, M):
xs, ys, zs = list(zip(*points))
return proj_transform(xs, ys, zs, M)
def proj_trans_clip_points(points, M):
xs, ys, zs = list(zip(*points))
return proj_transform_clip(xs, ys, zs, M)
def test_proj_draw_axes(M, s=1):
import pylab
xs, ys, zs = [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, s]
txs, tys, tzs = proj_transform(xs, ys, zs, M)
o, ax, ay, az = (txs[0], tys[0]), (txs[1], tys[1]), \
(txs[2], tys[2]), (txs[3], tys[3])
lines = [(o, ax), (o, ay), (o, az)]
ax = pylab.gca()
linec = LineCollection(lines)
ax.add_collection(linec)
for x, y, t in zip(txs, tys, ['o', 'x', 'y', 'z']):
pylab.text(x, y, t)
def test_proj_make_M(E=None):
# eye point
E = E or np.array([1, -1, 2]) * 1000
#E = np.array([20,10,20])
R = np.array([1, 1, 1]) * 100
V = np.array([0, 0, 1])
viewM = view_transformation(E, R, V)
perspM = persp_transformation(100, -100)
M = np.dot(perspM, viewM)
return M
def test_proj():
import pylab
M = test_proj_make_M()
ts = ['%d' % i for i in [0,1,2,3,0,4,5,6,7,4]]
xs, ys, zs = [0,1,1,0,0, 0,1,1,0,0], [0,0,1,1,0, 0,0,1,1,0], \
[0,0,0,0,0, 1,1,1,1,1]
xs, ys, zs = [np.array(v)*300 for v in (xs, ys, zs)]
#
test_proj_draw_axes(M, s=400)
txs, tys, tzs = proj_transform(xs, ys, zs, M)
ixs, iys, izs = inv_transform(txs, tys, tzs, M)
pylab.scatter(txs, tys, c=tzs)
pylab.plot(txs, tys, c='r')
for x, y, t in zip(txs, tys, ts):
pylab.text(x, y, t)
pylab.xlim(-0.2, 0.2)
pylab.ylim(-0.2, 0.2)
pylab.show()
def rot_x(V, alpha):
cosa, sina = np. | cos(alpha), np.sin(alpha)
M1 = np.array([[1,0,0,0],
[0,cosa,-sina,0],
[0,sina,cosa,0],
| [0,0,0,0]])
return np.dot(M1, V)
def test_rot():
V = [1,0,0,1]
print(rot_x(V, np.pi/6))
V = [0,1,0,1]
print(rot_x(V, np.pi/6))
if __name__ == "__main__":
test_proj()
|
Inspq/ansible | test/units/modules/identity/keycloak/test_keycloak_group.py | Python | gpl-3.0 | 7,158 | 0.013412 | import collections
import os
import unittest
from ansible.modules.identity.keycloak.keycloak_group import *
class KeycloakGroupTestCase(unittest.TestCase):
def test_create_group(self):
toCreate = {
"username":"admin",
"password":"admin",
"realm":"master",
"url":"http://localhost:18081",
"name":"test1",
"attributes": {
"attr1":["value1"],
"attr2":["value2"]
},
"realmRoles": [
"uma_athorization"
],
"clientRoles": {
"master-realm": [
"manage-users"
]
},
"state":"present",
"force":False
}
results = group(toCreate)
print (str(results))
self.assertTrue(results['changed'])
self.assertEquals(results["ansible_facts"]["group"]["name"], toCreate["name"], "name: " + results["ansible_facts"]["group"]["name"] + " : " + toCreate["name"])
self.assertDictEqual(results["ansible_facts"]["group"]["attributes"], toCreate["attributes"], "attributes: " + str(results["ansible_facts"]["group"]["attributes"]) + " : " + str(toCreate["attributes"]))
#self.assertDictEqual(results["ansible_facts"]["group"]["clientRoles"], toCreate["clientRoles"], "clientRoles: " + str(results["ansible_facts"]["group"]["clientRoles"]) + " : " + str(toCreate["clientRoles"]))
#self.assertListEqual(results["ansible_facts"]["group"]["realmRoles"], toCreate["realmRoles"], "realmRoles: " + str(results["ansible_facts"]["group"]["realmRoles"]) + " : " + str(toCreate["realmRoles"]))
def test_group_not_changed(self):
toDoNotChange = {
"username":"admin",
"password":"admin",
"realm":"master",
"url":"http://localhost:18081",
"name":"test2",
"attributes": {
"attr1":["value1"],
"attr2":["value2"]
},
"realmRoles": [
"uma_athorization"
],
"clientRoles": {
"master-realm": [
"manage-users"
]
},
"state":"present",
"force":False
}
group(toDoNotChange)
results = group(toDoNotChange)
self.assertFalse(results['changed'])
self.assertEquals(results["ansible_facts"]["group"]["name"], toDoNotChange["name"], "name: " + results["ansible_facts"]["group"]["name"] + " : " + toDoNotChange["name"])
self.assertDictEqual(results["ansible_facts"]["group"]["attributes"], toDoNotChange["attributes"], "attributes: " + str(results["ansible_facts"]["group"]["attributes"]) + " : " + str(toDoNotChange["attributes"]))
#self.assertDictEqual(results["ansible_facts"]["group"]["clientRoles"], toDoNotChange["clientRoles"], "clientRoles: " + str(results["ansible_facts"]["group"]["clientRoles"]) + " : " + str(toDoNotChange["clientRoles"]))
#self.assertListEqual(results["ansible_facts"]["group"]["realmRoles"], toDoNotChange["realmRoles"], "realmRoles: " + str(results["ansible_facts"]["group"]["realmRoles"]) + " : " + str(toDoNotChange["realmRoles"]))
def test_group_modify_force(self):
toDoNotChange = {
"username":"admin",
"password":"admin",
"realm":"master",
"url":"http://localhost:18081",
"name":"test3",
"attributes": {
"attr1":["value1"],
"attr2":["value2"]
},
"realmRoles": [
"uma_athorization"
],
"clientRoles": {
"master-realm": [
"manage-users"
]
},
"state":"present",
"force":False
}
group(toDoNotChange)
toDoNotChange["force"] = True
results = group(toDoNotChange)
self.assertTrue(results['changed'])
self.assertEquals(results["ansible_facts"]["group"]["name"], toDoNotChange["name"], "name: " + results["ansible_facts"]["group"]["name"] + " : " + toDoNotChange["name"])
self.assertDictEqual(results["ansible_facts"]["group"]["attributes"], toDoNotChange["attributes"], "attributes: " + str(results["ansible_facts"]["group"]["attributes"]) + " : " + str(toDoNotChange["attributes"]))
#self.assertDictEqual(results["ansible_facts"]["group"]["clientRoles"], toDoNotChange["clientRoles"], "clientRoles: " + str(results["ansible_facts"]["group"]["clientRoles"]) + " : " + str(toDoNotChange["clientRoles"]))
#self.assertListEqual(results["ansible_facts"]["group"]["realmRoles"], toDoNotChange["realmRoles"], "realmRoles: " + str(results["ansible_facts"]["group"]["realmRoles"]) + " : " + str(toDoNotChange["realmRoles"]))
def test_modify_group(self):
toChange = {
"username | ":"admin",
"password":"admin",
"realm":"master",
"url":"http://localhost:18081",
"n | ame":"test4",
"attributes": {
"attr1":["value1"],
"attr2":["value2"]
},
"realmRoles": [
"uma_athorization"
],
"clientRoles": {
"master-realm": [
"manage-users"
]
},
"state":"present",
"force":False
}
group(toChange)
toChange["attributes"] = {
"attr3":["value3"]
}
results = group(toChange)
self.assertTrue(results['changed'])
self.assertEquals(results["ansible_facts"]["group"]["name"], toChange["name"], "name: " + results["ansible_facts"]["group"]["name"] + " : " + toChange["name"])
self.assertDictEqual(results["ansible_facts"]["group"]["attributes"], toChange["attributes"], "attributes: " + str(results["ansible_facts"]["group"]["attributes"]) + " : " + str(toChange["attributes"]))
#self.assertDictEqual(results["ansible_facts"]["group"]["clientRoles"], toChange["clientRoles"], "clientRoles: " + str(results["ansible_facts"]["group"]["clientRoles"]) + " : " + str(toChange["clientRoles"]))
#self.assertListEqual(results["ansible_facts"]["group"]["realmRoles"], toChange["realmRoles"], "realmRoles: " + str(results["ansible_facts"]["group"]["realmRoles"]) + " : " + str(toChange["realmRoles"]))
def test_delete_group(self):
toDelete = {
"username":"admin",
"password":"admin",
"realm":"master",
"url":"http://localhost:18081",
"name":"test1",
"attributes": {
"attr1":["value1"],
"attr2":["value2"]
},
"state":"present",
"force":False
}
group(toDelete)
toDelete["state"] = "absent"
results = group(toDelete)
self.assertTrue(results['changed'])
self.assertEqual(results['stdout'], 'deleted', 'group has been deleted')
|
openattic/openattic | backend/ceph/migrations/0005_cephpool_percent_used.py | Python | gpl-2.0 | 479 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db | import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ceph', '0004_rm_models_based_on_storageobj'),
]
operations = [
migrations.AddField(
model_name='cephpool',
name='percent_used',
field=models.FloatField(default=None, editable=F | alse, blank=True),
preserve_default=True,
),
]
|
TomTranter/OpenPNM | openpnm/algorithms/TransientNernstPlanck.py | Python | mit | 2,989 | 0 | from openpnm.algorithms import TransientReactiveTransport
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class TransientNernstPlanck(TransientReactiveTransport):
r"""
A subclass of GenericTransport to perform steady and transient simulations
of pure diffusion, advection-diffusion and advection-diffusion with
migration.
"""
def __init__(self, settings={}, phase=None, ion='', **kwargs):
def_set = {'phase': None,
'quantity': 'pore.concentration.'+ion,
'conductance': 'throat.ad_dif_mig_conductance.'+ion,
'ion': ion,
'gui': {'setup': {'phase': None,
'quantity': '',
'conductance': '',
'ion': '',
't_initial': None,
't_final': None,
't_step': None,
't_output': None,
't_tolerance': None,
't_scheme': ''},
'set_IC': {'values': None},
'set_rate_BC': {'pores': None,
'values': None},
'set_value_BC': {'pores': None,
'values': None},
'set_source': {'pores': None,
'propname': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase= | phase)
def setup(self, phase=None, quantity='', conductance='', ion='',
t_initial=None, t_final=None, t_step=None, t_output=None,
t_tolerance=None, t_precision=None, t_scheme='', **kwargs):
if phase:
self.settings['phase'] = phase.nam | e
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if ion:
self.settings['ion'] = ion
if t_initial is not None:
self.settings['t_initial'] = t_initial
if t_final is not None:
self.settings['t_final'] = t_final
if t_step is not None:
self.settings['t_step'] = t_step
if t_output is not None:
self.settings['t_output'] = t_output
if t_tolerance is not None:
self.settings['t_tolerance'] = t_tolerance
if t_precision is not None:
self.settings['t_precision'] = t_precision
if t_scheme:
self.settings['t_scheme'] = t_scheme
self.settings.update(kwargs)
|
FrancoisRheaultUS/dipy | doc/tools/apigen.py | Python | bsd-3-clause | 17,957 | 0.000223 | """
Attempt to generate templates for module reference with Sphinx
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module_with_import``
script.
Notes
-----
This parsing is based on the import and introspection of modules.
Previously functions and classes were found by parsing the text of .py files.
Extension modules should be discovered and included as well.
This is a modified version of a script originally shipped with the PyMVPA
project, then adapted for use first in NIPY and then in skimage. PyMVPA
is an MIT-licensed project.
"""
# Stdlib imports
import os
import re
from inspect import getmodule
from importlib import import_module
from types import BuiltinFunctionType, FunctionType
from inspect import ismethod
# suppress print statements (warnings for empty files)
DEBUG = True
class ApiDocWriter(object):
""" Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format"""
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.txt',
package_skip_patterns=None,
module_skip_patterns=None,
other_defines=True
):
""" Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for searching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
other_defines : {True, False}, optional
Whether to include classes and functions that are imported in a
particular module but not defined there.
"""
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
self.other_defines = other_defines
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
""" Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
"""
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
root_module = self._import(package_name)
self.root_path = root_module.__path__[-1]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
| 'get/set package_name')
def _import(self, name):
""" Import namespace package """
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def _get_object_name(self, line):
""" Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docw | riter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
"""
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
""" Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
"""
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace(self.package_name + '.', '')
path = path.replace('.', os.path.sep)
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
""" Convert directory path to uri """
package_dir = self.package_name.replace('.', os.path.sep)
relpath = dirpath.replace(self.root_path, package_dir)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
""" Parse module defined in *uri* """
filename = self._uri2path(uri)
if filename is None:
print(filename, 'erk')
# nothing that we could handle here.
return ([], [])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f)
f.close()
return functions, classes
def _parse_module_with_import(self, uri):
"""Look for functions and classes in an importable module.
Parameters
----------
uri : str
The name of the module to be parsed. This module needs to be
importable.
Returns
-------
functions : list of str
A list of (public) function names in the module.
classes : list of str
A list of (public) class names in the module.
"""
mod = import_module(uri)
# find all public objects in the module.
obj_strs = [obj for obj in dir(mod) if not obj.startswith('_')]
functions = []
classes = []
for obj_str in obj_strs:
# find the actual object from its string representation
if obj_str not in mod.__dict__:
continue
obj = mod.__dict__[obj_str]
# Check if function / class defined in module
if not self.other_defines and not getmodule(obj) == mod:
continue
# figure out if obj is a function or class
if (hasattr(obj, 'func_name') or
isinstance(obj, BuiltinFunctionType) or
ismeth |
jackdpage/pylux | pylux/lib/tagger.py | Python | gpl-3.0 | 1,597 | 0.002505 | # Generates some automatic tags based on the value of other tags or information
# available.
import math
from pylux import reference
def tag_fixture_colour(fixture):
if 'gel' in fixture.data:
try:
fixture.data['colour'] = reference.gel_colours[fixture.data['gel']]
except KeyError:
fixture.data['colour'] = 'White'
else:
fi | xture.data['colour'] = 'White'
def tag_fixture_rotation(fixture):
# If we can't calculate the rotation (e.g. a moving head would not have any focus values) then set the rotation
# to zero to have the fixture point in its default orientation. Unless the rotation tag alread | y exists, in which
# case leave it as-is in case it was added manually.
if 'posX' in fixture.data and 'posY' in fixture.data and 'focusX' in fixture.data and 'focusY' in fixture.data:
pos = [float(fixture.data['posX']), float(fixture.data['posY'])]
focus = [float(fixture.data['focusX']), float(fixture.data['focusY'])]
fixture.data['rotation'] = 90 - math.degrees(math.atan2(focus[1] - pos[1], focus[0] - pos[0]))
elif 'rotation' not in fixture.data:
fixture.data['rotation'] = 0
def tag_fixture_patch(doc, fixture):
if len(fixture.functions):
patch = doc.get_function_patch(fixture.functions[0])
fixture.data['patch_start'] = patch
def tag_fixture_all(doc, fixture):
tag_fixture_all_doc_independent(fixture)
tag_fixture_patch(doc, fixture)
def tag_fixture_all_doc_independent(fixture):
tag_fixture_colour(fixture)
tag_fixture_rotation(fixture)
|
douban/code | vilya/views/api/gists.py | Python | bsd-3-clause | 2,398 | 0 | # -*- coding: utf-8 -*-
import json
from vilya.libs import api_errors
from vilya.models.gist import (
Gist, gist_detail, PRIVILEGE_PUBLIC, PRIVILEGE_SECRET)
from vilya.views.api.gist import GistUI
from vilya.views.api.utils import json_body
_q_exports = ['starred']
@json_body
def _q_index(request):
if request.method == 'POST':
desc = request.data.get('description') or request.get_form_var(
'description', '')
# DEPRECATED, will | removed in future, use json to post data
file_names = request.get_form_var('file_name', '')
file_contents = request.get_form_var('file_contents', '')
if not request.data.get('public' | ):
is_public = PRIVILEGE_SECRET
else:
is_public = PRIVILEGE_PUBLIC
files = request.data.get('files')
if files:
file_names = []
file_contents = []
for file_name, file in files.iteritems():
file_names.append(file_name)
file_contents.append(file.get("content"))
if file_names and file_contents:
user = request.user
user_id = user and user.username or Gist.ANONYMOUS
gist = Gist.add(desc, user_id, is_public,
file_names, file_contents)
ret = gist_detail(gist, include_forks=True)
request.response.set_status(201)
return json.dumps(ret)
else:
raise api_errors.UnprocessableEntityError
if request.user:
gists = Gist.gets_by_owner(request.user.username, start=request.start)
else:
gists = Gist.discover('discover', start=request.start, limit=5)
ret = [gist_detail(g) for g in gists]
request.response.set_status(200)
return json.dumps(ret)
def starred(request):
user = request.user
if user:
gists = Gist.stars_by_user(user.username, start=request.start)
else:
ret = []
ret = [gist_detail(g) for g in gists]
request.response.set_status(200)
return json.dumps(ret)
def _q_lookup(request, item):
if item.isdigit():
return GistUI(request, item)
raise api_errors.NotFoundError("gist")
def _q_access(request):
request.response.set_content_type('application/json; charset=utf-8')
start = request.get_form_var('start', '0')
request.start = start.isdigit() and int(start) or 0
|
tsdmgz/ansible | lib/ansible/module_utils/ironware.py | Python | gpl-3.0 | 3,511 | 0.001139 | #
# Copyright (c) 2017, Paul Baker <paul@paulbaker.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, EntityCollection
from ansible.module_utils.connection import Connection, exec_command
_DEVICE_CONFIG = None
_CONNECTION = None
ironware_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallb | ack, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
}
ironware_argument_spec = {
'provider': dict(type='dict', options=ironware_provider_spec)
}
command_spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
def get_provider_argspec():
return ironware_provider_spec
def check_args(module):
pass
de | f get_connection(module):
global _CONNECTION
if _CONNECTION:
return _CONNECTION
_CONNECTION = Connection(module._socket_path)
return _CONNECTION
def to_commands(module, commands):
if not isinstance(commands, list):
raise AssertionError('argument must be of type <list>')
transform = EntityCollection(module, command_spec)
commands = transform(commands)
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
module.warn('only show commands are supported when using check '
'mode, not executing `%s`' % item['command'])
return commands
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
commands = to_commands(module, to_list(commands))
responses = list()
for cmd in commands:
out = connection.get(**cmd)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def get_config(module, source='running', flags=None):
global _DEVICE_CONFIG
if source is 'running' and flags is None and _DEVICE_CONFIG is not None:
return _DEVICE_CONFIG
else:
conn = get_connection(module)
out = conn.get_config(source=source, flags=flags)
cfg = to_text(out, errors='surrogate_then_replace').strip()
if source is 'running' and flags is None:
_DEVICE_CONFIG = cfg
return cfg
def load_config(module, config):
conn = get_connection(module)
conn.edit_config(config)
|
stitchfix/pybossa | pybossa/sched.py | Python | agpl-3.0 | 7,613 | 0.003678 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
#import json
#from flask import Blueprint, request, url_for, flash, redirect, abort
#from flask import abort, request, make_response, current_app
from sqlalchemy.sql import text
from pybossa.model.app import App
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
from pybossa.core import db
import random
def new_task(app_id, user_id=None, user_ip=None, offset=0):
'''Get a new task by calling the appropriate scheduler function.
'''
app = db.slave_session.query(App).get(app_id)
if not app.allow_anonymous_contributors and user_id is None:
info = dict(
error="This project does not allow anonymous contributors")
error = Task(info=info)
return error
else:
sched_map = {
'default': get_depth_first_task,
'breadth_first': get_breadth_first_task,
'depth_first': get_depth_first_task,
'random': get_random_task,
'incremental': get_incremental_task}
sched = sched_map.get(app.info.get('sched'), sched_map['default'])
return sched(app_id, user_id, user_ip, offset=offset)
def get_breadth_first_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Gets a new task which have the least number of task runs (excluding the
current user).
Note that it **ignores** the number of answers limit for efficiency reasons
(this is not a big issue as all it means is that you may end up with some
tasks run more than is strictly needed!)
"""
# Uncomment the next three lines to profile the sched function
#import timeit
#T = timeit.Timer(lambda: get_candidate_tasks(app_id, user_id,
# user_ip, n_answers))
#print "First algorithm: %s" % T.timeit(number=1)
if user_id and not user_ip:
sql = text('''
SELECT task.id, COUNT(task_run.task_id) AS taskcount FROM task
LEFT JOIN task_run ON (task.id = task_run.task_id) WHERE NOT EXISTS
(SELECT 1 FROM task_run WHERE app_id=:app_id AND
user_id=:user_id AND task_id=task.id)
AND task.app_id=:app_id AND task.state !='completed'
group by task.id ORDER BY taskcount, id ASC LIMIT 10;
''')
tasks = db.slave_session.execute(sql, dict(app_id=app_id, user_id=user_id))
else:
if not user_ip: # pragma: no cover
user_ip = '127.0.0.1'
sql = text('''
SELECT task.id, COUNT(task_run.task_id) AS taskcount FROM task
LEFT JOIN task_run ON (task.id = task_run.task_id) WHERE NOT EXISTS
(SELECT 1 FROM task_run WHERE app_id=:app_id AND
user_ip=:user_ip AND task_id=task.id)
AND task.app_id=:app_id AND task.state !='completed'
group by task.id ORDER BY taskcount, id ASC LIMIT 10;
''')
# results will be list of (taskid, count)
tasks = db.slave_session.execute(sql, dict(app_id=app_id, user_ip=user_ip))
# ignore n_answers for the present - we will just keep going once we've
# done as many as we need
tasks = [x[0] for x in tasks]
if tasks:
if (offset == 0):
return db.slave_session.query(Task).get(tasks[0])
else:
if (offset < len(tasks)):
return db.slave_session.query(Task).get(tasks[offset])
else:
return None
else: # pragma: no cover
return None
def get_depth_first_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Gets a new task for a given project"""
# Uncomment the next three lines to profile the sched function
#import timeit
#T = timeit.Timer(lambda: get_candidate_tasks(app_id, user_id,
# user_ip, n_answers))
#print "First algorithm: %s" % T.timeit(number=1)
candidate_tasks = get_candidate_tasks(app_id, user_id, user_ip, n_answers, offset=offset)
total_remaining = len(candidate_tasks)
#print "Available tasks %s " % total_remaining
if total_remaining == 0:
return None
if (offset == 0):
return candidate_tasks[0]
else:
if (offset < len(candidate_tasks)):
return candidate_tasks[offset]
else:
return None
def get_random_task(app_id, user_id=None, user | _ip=None, n_answers=30, off | set=0):
"""Returns a random task for the user"""
app = db.slave_session.query(App).get(app_id)
from random import choice
if len(app.tasks) > 0:
return choice(app.tasks)
else:
return None
def get_incremental_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""
Get a new task for a given project with its last given answer.
It is an important strategy when dealing with large tasks, as
transcriptions.
"""
candidate_tasks = get_candidate_tasks(app_id, user_id, user_ip,
n_answers, offset=0)
total_remaining = len(candidate_tasks)
if total_remaining == 0:
return None
rand = random.randrange(0, total_remaining)
task = candidate_tasks[rand]
#Find last answer for the task
q = db.slave_session.query(TaskRun)\
.filter(TaskRun.task_id == task.id)\
.order_by(TaskRun.finish_time.desc())
last_task_run = q.first()
if last_task_run:
task.info['last_answer'] = last_task_run.info
#TODO: As discussed in GitHub #53
# it is necessary to create a lock in the task!
return task
def get_candidate_tasks(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Gets all available tasks for a given project and user"""
rows = None
if user_id and not user_ip:
query = text('''
SELECT id FROM task WHERE NOT EXISTS
(SELECT task_id FROM task_run WHERE
app_id=:app_id AND user_id=:user_id AND task_id=task.id)
AND app_id=:app_id AND state !='completed'
ORDER BY priority_0 DESC, id ASC LIMIT 10''')
rows = db.slave_session.execute(query, dict(app_id=app_id, user_id=user_id))
else:
if not user_ip:
user_ip = '127.0.0.1'
query = text('''
SELECT id FROM task WHERE NOT EXISTS
(SELECT task_id FROM task_run WHERE
app_id=:app_id AND user_ip=:user_ip AND task_id=task.id)
AND app_id=:app_id AND state !='completed'
ORDER BY priority_0 DESC, id ASC LIMIT 10''')
rows = db.slave_session.execute(query, dict(app_id=app_id, user_ip=user_ip))
tasks = []
for t in rows:
tasks.append(db.slave_session.query(Task).get(t.id))
return tasks
|
tradej/pykickstart-old | tests/commands/url.py | Python | gpl-2.0 | 3,282 | 0.004875 | #
# Martin Gracik <mgracik@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission o | f Red Hat, Inc.
#
import unittest
from tests.baseclass import *
class FC3_TestCase(CommandTest):
def runTest(self):
# pass
self.assert_parse("url --url=http://domain.com", "url --url=\"http://domain.com\"\n")
# fail
# missing required option --url
self.assert_parse_error("url", Kickst | artValueError)
self.assert_parse_error("url --url", KickstartParseError)
class F13_TestCase(FC3_TestCase):
def runTest(self):
# run FC3 test case
FC3_TestCase.runTest(self)
# pass
self.assert_parse("url --url=http://someplace/somewhere --proxy=http://wherever/other",
"url --url=\"http://someplace/somewhere\" --proxy=\"http://wherever/other\"\n")
# fail
self.assert_parse_error("cdrom --proxy=http://someplace/somewhere", KickstartParseError)
self.assert_parse_error("url --url=http://someplace/somewhere --proxy", KickstartParseError)
self.assert_parse_error("url --proxy=http://someplace/somewhere", KickstartValueError)
class F14_TestCase(F13_TestCase):
def runTest(self):
# run FC6 test case
F13_TestCase.runTest(self)
# pass
self.assert_parse("url --url=https://someplace/somewhere --noverifyssl",
"url --url=\"https://someplace/somewhere\" --noverifyssl\n")
# fail
self.assert_parse_error("cdrom --noverifyssl", KickstartParseError)
class F18_TestCase(F14_TestCase):
def runTest(self):
# run F14 test case.
F14_TestCase.runTest(self)
# pass
self.assert_parse("url --mirrorlist=http://www.wherever.com/mirror",
"url --mirrorlist=\"http://www.wherever.com/mirror\"\n")
# fail
# missing one of required options --url or --mirrorlist
self.assert_parse_error("url", KickstartValueError)
self.assert_parse_error("url --mirrorlist", KickstartParseError)
# It's --url, not --baseurl.
self.assert_parse_error("url --baseurl=www.wherever.com", KickstartParseError)
# only one of --url or --mirrorlist may be specified
self.assert_parse_error("url --url=www.wherever.com --mirrorlist=www.wherever.com",
KickstartValueError)
if __name__ == "__main__":
unittest.main()
|
SKIRT/PTS | modeling/component/component.py | Python | agpl-3.0 | 19,107 | 0.002355 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.component.component Contains the ModelingComponent class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from abc import ABCMeta
# Import astronomical modules
from astropy.units import Unit
# Import the relevant PTS classes and modules
from ...core.basics.configurable import Configurable
from ...core.tools import introspection
from ...core.tools import filesystem as fs
from ...core.filter.broad import BroadBandFilter
from ...core.basics.configuration import Configuration
from ..core.history import ModelingHistory
from ..core.commands import ModelingCommands
from ..core.environment import GalaxyModelingEnvironment, SEDModelingEnvironment, ImagesModelingEnvironment
from ...core.tools.utils import lazyproperty
from ...core.tools import parsing
# -----------------------------------------------------------------
class ModelingComponent(Configurable):
"""
This class...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(ModelingComponent, self).__init__(*args, **kwargs)
# The modeling configuration file
self.config_file_path = None
# The modeling environemnt
self.environment = None
# PTS directories
self.kernels_path = None
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(ModelingComponent, self).setup(**kwargs)
# Determine the path to the modeling configuration file
self.config_file_path = fs.join(self.config.path, "modeling.cfg")
# Check for the presence of the configuration file
if not fs.is_file(self.config_file_path): raise ValueError("The current working directory (" + self.config.path + ") is not a radiative transfer modeling directory (the configuration file is missing)")
# Determine the path to the kernels user directory
self.kernels_path = fs.join(introspection.pts_user_dir, "kernels")
# Create the modeling environment
if self.is_galaxy_modeling: self.environment = GalaxyModelingEnvironment(self.config.path)
elif self.is_sed_modeling: self.environment = SEDModelingEnvironment(self.config.path)
elif self.is_images_modeling: self.environment = ImagesModelingEnvironment(self.config.path)
# -----------------------------------------------------------------
@property
def history_file_path(self):
return self.environment.history_file_path
# -----------------------------------------------------------------
@property
def commands_file_path(self):
return self.environment.commands_file_path
# -----------------------------------------------------------------
@property
def fit_path(self):
return self.environment.fit_path
# -----------------------------------------------------------------
@property
def analysis_path(self):
return self.environment.analysis_path
# -----------------------------------------------------------------
@property
def reports_path(self):
return self.environment.reports_path
# -----------------------------------------------------------------
@property
def visualisation_path(self):
return self.environment.visualisation_path
# -----------------------------------------------------------------
@property
def plot_path(self):
return self.environment.plot_path
# -----------------------------------------------------------------
@property
def log_path(self):
return self.environment.log_path
# -----------------------------------------------------------------
@property
def config_path(self):
return self.environment.config_path
# -----------------------------------------------------------------
@property
def show_path(self):
return self.environment.show_path
# -----------------------------------------------------------------
@property
def build_path(self):
return self.environment.build_path
# -----------------------------------------------------------------
@property
def html_path(self):
return self.environment.html_path
# -----------------------------------------------------------------
@property
def object_name(self):
return self.modeling_configuration.name
# -----------------------------------------------------------------
@lazyproperty
def observed_sed(self):
# Return the observed SED
if self.is_galaxy_modeling: return self.environment.observed_sed
elif self.is_sed_modeling: return self.environment.observed_sed
else: raise ValueError("Observed SED is not defined for modeling types other than 'galaxy' or 'sed'")
# -----------------------------------------------------------------
@lazyproperty
def truncated_sed(self):
if not self.is_galaxy_modeling: raise RuntimeError("Something went wrong")
return self.environment.truncated_sed
# -----------------------------------------------------------------
@lazyproperty
def observed_sed_path(self):
# Return the correct path
if self.is_galaxy_modeling: return self.environment.observed_sed_path
elif self.is_sed_modeling: return self.environment.sed_path
else: raise ValueError("Obser | ved SED not defined for modeling types other than 'galax | y' or 'sed'")
# -----------------------------------------------------------------
@lazyproperty
def truncated_sed_path(self):
if not self.is_galaxy_modeling: raise RuntimeError("Something went wrong")
return self.environment.truncated_sed_path
# -----------------------------------------------------------------
def observed_flux(self, fltr, unit=None, add_unit=True):
"""
This function ...
:param fltr:
:param unit:
:param add_unit:
:return:
"""
return self.observed_sed.photometry_for_filter(fltr, unit=unit, add_unit=add_unit)
# -----------------------------------------------------------------
@lazyproperty
def observed_filters(self):
return self.observed_sed.filters()
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_names(self):
return [str(fltr) for fltr in self.observed_filters]
# -----------------------------------------------------------------
@lazyproperty
def observed_filter_wavelengths(self):
return [fltr.wavelength for fltr in self.observed_filters]
# -----------------------------------------------------------------
@lazyproperty
def sed_filters(self):
return self.observed_sed.filters()
# -----------------------------------------------------------------
@lazyproperty
def sed_filter_names(self):
return [str(fltr) for fltr in self.sed_filters]
# -----------------------------------------------------------------
@lazyproperty
def sed_filter_wavelengths(self):
return [fltr.pivot for fltr in self.sed_filters]
# -----------------------------------------------------------------
@lazyproperty
def modeling_configuration(self):
"""
This function ...
:return:
|
gchrupala/reimaginet | imaginet/defn/audiovis_rhn.py | Python | mit | 5,671 | 0.011462 | from funktional.layer import Layer, Dense, StackedGRU, StackedGRUH0, Convolution1D, \
Embedding, OneHot, clipped_rectify, sigmoid, steeper_sigmoid, tanh, CosineDistance,\
last, softmax3d, params, Attention
from funktional.rhn import StackedRHN0
import funktional.context as context
from funktional.layer import params
import imaginet.task as task
from funktional.util import autoassign
import funktional.util as util
from funktional.util import orthogonal, xavier, uniform
import theano.tensor as T
import theano
import zipfile
import numpy
import StringIO
import json
import cPickle as pickle
from theano.tensor.shared_randomstreams import RandomStreams
from imaginet.simple_data import vector_padder
class Encoder(Layer):
def __init__(self, size_vocab, size, depth=1, recur_depth=1,
filter_length=6, filter_size=64, stride=2, drop_i=0.75 , drop_s=0.25, residual=False, seed=1):
autoassign(locals())
self.Conv = Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride)
self.RHN = StackedRHN0(self.filter_size, self.size, depth=self.depth, recur_depth=self.recur_depth,
drop_i=self.drop_i, drop_s=self.drop_s, residual=self.residual, seed=self.seed)
def params(self):
return params(self.Conv, self.RHN)
| def __call__(self, input):
return self.RHN(self.Conv(input))
class Visual(task.Task):
def __init__(self, config):
autoassign(locals())
self.margin_size = config.get('margin_size', 0.2)
self.updater = util.Adam(max_nor | m=config['max_norm'], lr=config['lr'])
self.Encode = Encoder(config['size_vocab'],
config['size'],
filter_length=config.get('filter_length', 6),
filter_size=config.get('filter_size', 1024),
stride=config.get('stride', 3),
depth=config.get('depth', 1),
recur_depth=config.get('recur_depth',1),
drop_i=config.get('drop_i', 0.75),
drop_s=config.get('drop_s', 0.25),
residual=config.get('residual', False),
seed=config.get('seed', 1))
self.Attn = Attention(config['size'], size=config.get('size_attn', 512))
self.ImgEncoder = Dense(config['size_target'], config['size'])
self.inputs = [T.ftensor3()]
self.target = T.fmatrix()
def compile(self):
task.Task.compile(self)
self.encode_images = self._make_encode_images()
self.conv_states = self._make_conv_states()
def params(self):
return params(self.Encode, self.Attn, self.ImgEncoder)
def __call__(self, input):
return util.l2norm(self.Attn(self.Encode(input)))
# FIXME HACK ALERT
def cost(self, i, s_encoded):
if self.config['contrastive']:
i_encoded = util.l2norm(self.ImgEncoder(i))
return util.contrastive(i_encoded, s_encoded, margin=self.margin_size)
else:
raise NotImplementedError
def args(self, item):
return (item['audio'], item['target_v'])
def _make_representation(self):
with context.context(training=False):
rep = self.Encode(*self.inputs)
return theano.function(self.inputs, rep)
def _make_pile(self):
with context.context(training=False):
rep = self.Encode.RHN.intermediate(self.Encode.Conv(*self.inputs))
return theano.function(self.inputs, rep)
def _make_conv_states(self):
with context.context(training=False):
states = self.Encode.Conv(*self.inputs)
return theano.function(self.inputs, states)
def _make_encode_images(self):
images = T.fmatrix()
with context.context(training=False):
rep = util.l2norm(self.ImgEncoder(images))
return theano.function([images], rep)
def encode_sentences(model, audios, batch_size=128):
"""Project audios to the joint space using model.
For each audio returns a vector.
"""
return numpy.vstack([ model.task.predict(vector_padder(batch))
for batch in util.grouper(audios, batch_size) ])
def layer_states(model, audios, batch_size=128):
"""Pass audios through the model and for each audio return the state of each timestep and each layer."""
lens = (numpy.array(map(len, audios)) + model.config['filter_length']) // model.config['stride']
rs = [ r for batch in util.grouper(audios, batch_size) for r in model.task.pile(vector_padder(batch)) ]
return [ r[-l:,:,:] for (r,l) in zip(rs, lens) ]
def conv_states(model, audios, batch_size=128):
"""Pass audios through the model and for each audio return the state of each timestep at the convolutional layer."""
lens = (numpy.array(map(len, audios)) + model.config['filter_length']) // model.config['stride']
rs = [ r for batch in util.grouper(audios, batch_size) for r in model.task.conv_states(vector_padder(batch)) ]
return [ r[-l:,:] for (r,l) in zip(rs, lens) ]
def encode_images(model, imgs, batch_size=128):
"""Project imgs to the joint space using model.
"""
return numpy.vstack([ model.task.encode_images(batch)
for batch in util.grouper(imgs, batch_size) ])
def symbols(model):
return model.batcher.mapper.ids.decoder
|
quantumlib/OpenFermion-Cirq | openfermioncirq/contrib/__init__.py | Python | apache-2.0 | 675 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Licen | se at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Li | cense for the specific language governing permissions and
# limitations under the License.
"""Package for contributions.
Any contributions not ready for full production can be put in a subdirectory in
this package.
"""
|
shoyer/numpy | numpy/distutils/exec_command.py | Python | bsd-3-clause | 10,919 | 0.003114 | """
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <pearu@cens.ioc.ee>
Created: 11 January 2003
Requires: Python 2.x
Successfully tested on:
======== ============ =================================================
os.name sys.platform comments
======== ============ =================================================
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
posix darwin Darwin 7.2.0, Python 2.3
nt win32 Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt win32 Windows 98, Python 2.1.1. Idle 0.8
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
nt win32 Windows XP, Python 2.3.3
======== ============ =================================================
Known bugs:
* Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['exec_command', 'find_executable']
import os
import sys
import subprocess
import locale
import warnings
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
def filepath_from_subprocess_output(output):
"""
Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
Inherited from `exec_command`, and possibly incorrect.
"""
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
output = output.decode(mylocale, errors='replace')
output = output.replace('\r\n', '\n')
# Another historical oddity
if output[-1:] == '\n':
output = output[:-1]
# stdio uses bytes in python 2, so to avoid issues, we simply
# remove all non-ascii characters
if sys.version_info < (3, 0):
output = output.encode('ascii', errors='replace')
return output
def forward_bytes_to_stdout(val):
"""
Forward bytes from a subprocess call to the console, without attempting to
decode them.
The assumption is that the subprocess call already returned bytes in
a suitable encoding.
"""
if sys.version_info.major < 3:
# python 2 has binary output anyway
sys.stdout.write(val)
elif hasattr(sys.stdout, 'buffer'):
# use the underlying binary output if there is one
sys.stdout.buffer.write(val)
elif hasattr(sys.stdout, 'encoding'):
# round-trip the encoding if necessary
sys.stdout.write(val.decode(sys.stdout.encoding))
else:
# make a best-guess at the encoding
sys.stdout.write(val.decode('utf8', errors='replace'))
def temp_file_name():
# 2019-01-30, 1.17
warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt', 'dos']:
fdir, fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW', 'PYTHON')
pythonexe = os.path.join(fdir, fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH', os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt', 'dos', 'os2']:
fn, ext = os.path.splitext(exe)
extra_suffixes = ['.exe', '.com', '.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.info('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {name: os.environ.get(name) for name in names}
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name, value in env.items():
os.environ[name] = value or ''
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
.. deprecated:: 1.17
Use subprocess.Popen instead
Parameters
----------
command : str
A concatenated string of executable and arguments.
execute_in : str
Before running command ``cd execute_in`` and after ``cd -``.
use_shell : {bool, None}, optional
If True, execute ``sh -c command``. Default None (True)
use_tee : {bool, None}, optional
If True use tee. Default None (True)
Returns
-------
res : str
Both stdout and stderr messages.
Notes
-----
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
# 2019-01-30, 1.17
warnings.warn('exec_command is deprecated since NumPy v1.17, use '
'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
log.debug('exec_command(%r,%s)' % (command,\
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
| exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
st = _ | exec_command(command,
|
GutiDeng/bcpy | KvDB/ImplSQLite.py | Python | gpl-3.0 | 9,116 | 0.00724 | from Interface import Interface
import sqlite3
import time
import types
import json
import os
class Implementation(Interface):
def __init__(self, connstr, *args, **kwargs):
dbfile = connstr
# automatically create parent directory
pdir = os.path.dirname(dbfile)
if not os.access(pdir, os.F_OK): os.makedirs(pdir, 0755)
self._tablename = args[0] if len(args) > 0 else 'ISLETDEFAULTTABLE'
self._conn = sqlite3.connect(dbfile, isolation_level=None)
self._cursor = self._conn.cursor()
self._list_length = kwargs.get('list_length', 2048)
# keys
def exists(self, k, f=None, format='dict'):
"""Tells whether the given k (or field f under k) exists."""
try:
return self._exists(k, f, format)
except sqlite3.OperationalError:
self._init_dict()
return self._exists(k, f, format)
def delete(self, k, f=None, format='dict'):
"""Deletes the k (or field f under k)."""
try:
self._delete(k, f, format)
except sqlite3.OperationalError:
self._init_dict()
self._delete(k, f, format)
def keys(self, format='dict'):
"""Acts as an iterator over the existing keys."""
tablename = self._get_tablename(format)
try:
for k in self._keys(format):
yield k
except sqlite3.OperationalError:
self._init_dict()
for k in self._keys(format):
yield k
def flushdb(self, format='dict'):
"""Remove all data."""
try:
tablename = self._get_tablename(format)
self._cursor.execute('''DELETE FROM %s''' % (tablename,))
except sqlite3.OperationalError:
pass
# Dict
def set(self, k, f, v=None):
"""Sets value v to the field f under k."""
if type(f) == types.DictType:
for _f, _v in f.iteritems():
self.set(k, _f, _v)
return
try:
self._set(k, f, v)
except sqlite3.OperationalError:
self._init_dict()
try:
self._set(k, f, v)
except sqlite3.OperationalError:
raise
def get(self, k, f=None, v=None):
"""Gets the value of field f under k, or v when absence."""
try:
return self._get(k, f, v)
except sqlite3.OperationalError:
self._init_dict()
try:
return self._get(k, f, v)
except sqlite3.OperationalError:
raise
# List
def append(self, k, v):
"""Appends v to the list under k."""
try:
return self._append(k, v)
except sqlite3.OperationalError:
self._init_list()
try:
return self._append(k, v)
except sqlite3.OperationalError:
raise
def pop(self, k, v=None):
"""Removes and return the last value from list k, or v when the list is empty."""
try:
return self._pop(k, v)
except sqlite3.OperationalError:
self._init_list()
try:
return self._pop(k, v)
except sqlite3.OperationalError:
raise
def prepend(self, k, v):
"""Prepends v to the list under k."""
try:
return self._prepend(k, v)
except sqlite3.OperationalError:
self._init_list()
try:
return self._prepend(k, v)
except sqlite3.OperationalError:
raise
def shift(self, k, v=None):
"""Removes and return the first value from list k, or v when the list is empty."""
try:
return self._shift(k, v)
except sqlite3.OperationalError:
self._init_list()
try:
return self._shift(k, v)
except sqlite3.OperationalError:
raise
############################################
# Implementations
def _exists(self, k, f=None, format='dict'):
"""Tells whether the given k (or field f under k) exists."""
tablename = self._get_tablename(format)
if f is None:
self._cursor.execute('''SELECT k FROM %s WHERE k=?''' % (tablename,), (k,))
return True if self._cursor.fetchone() else False
else:
self._cursor.execute('''SELECT k FROM %s WHERE k=? AND f=?''' % (tablename,), (k, f))
return True if self._cursor.fetchone() else False
def _delete(self, k, f=None, format='dict'):
"""Deletes the k (or field f under k)."""
tablename = self._get_tablename(format)
if f is None:
self._cursor.execute('''DELETE FROM %s WHERE k=?''' % (tablename,), (k,))
else:
self._cursor.execute('''DELETE FROM %s WHERE k=? AND f=?''' % (tablename,), (k, f))
def _keys(self, format='dict'):
"""Acts as an iterator over the existing keys."""
tablename = self._get_tablename(format)
cursor = self._conn.cursor()
cursor.execute('''SELECT DISTINCT k FROM %s''' % (tablename,))
while True:
row = cursor.fetchone()
if not row: break
yield row[0] # row[k]
# return Iterator
def _set(self, k, f, v):
self._cursor.execute('''INSERT OR REPLACE INTO %s VALUES (?, ?, ?)''' % (self._get_tablename(),), (k, f, v))
def _get(self, k, f=None, v=None):
"""Gets the value of field f under k, or v when absence."""
tablename = self._get_tablename()
if f is not None:
self._cursor.execute('''SELECT v FROM %s WHERE k=? AND f=?''' % (tablename,), (k, f))
row = self._cursor.fetchone()
return row[0] if row else v
else:
res = {}
self._cursor.execute('''SELECT f, v FROM %s WHERE k=?''' % (tablename,), (k, ))
while True:
row = self._cursor.fetchone()
if not row: break
res[row[0]] = row[1]
return res
def _append(self, k, v):
"""Appends v to the list under k."""
tablename = self._get_tablename('list')
self._cursor.execute('''INSERT INTO %s VALUES (?, ?, ?)''' % (tablename,), (k, time.time(), v))
self._cursor.execute('''DELETE FROM {tb} WHERE t < (SELECT t FROM {tb} ORDER BY t LIMIT 1 OFFSET {offset})\
'''.format(tb=tablename, offset=self._list_length))
def _pop(self, k, v):
tablename = self._get_tablename('list')
self._cursor.execute('''SELECT t, v FROM %s WHERE k=? ORDER BY t DESC LIMIT 1''' % (tablename,), (k,))
row = self._cursor.fetchone()
if row:
t, v = row[0], row[1]
self._cursor.execute('''DELETE FROM %s WHERE k=? AND t=?''' % (tablename,), (k, t))
return v
else:
return v
def _prepend(self, k, v):
"""Prepends v to the list under k."""
tablename = self._get_tablename('list')
self._cursor.execute('''INSERT INTO %s VALUES (?, (SELECT MIN(t)-1 FROM %s), ?)''' % (tablename, tablename), (k, v))
self._cursor.execute('''DELETE FROM {tb} WHERE t < (SELECT t FROM {tb} ORDER BY t LIMIT 1 OFFSET {offset})\
'''.format(tb=tablename, offset=self._list_length))
def _shift(self, k, v):
tablename = self._get_tablename('list')
self._cursor.execute('''SELECT t, v FROM %s WHERE k=? ORDER BY t LIMIT 1''' % (tablename,), (k | ,))
row = self._cursor.fetchone()
if row:
t, v = row[0], row[1]
self._cursor.execute('''DELETE FROM %s WHE | RE k=? AND t=?''' % (tablename,), (k, t))
return v
else:
return v
def _init_dict(self, force=False):
tablename = self._get_tablename()
if force:
self._cursor.execute('''DROP TABLE IF EXISTS %s''' % (tablename,))
self._cursor.execute('''CREATE TABLE IF |
True-Demon/gitkali | gitkali.py | Python | gpl-3.0 | 5,391 | 0.004823 | #!/usr/bin/python3
"""
Written by: True Demon
The non-racist Kali repository grabber for all operating systems.
Git Kali uses Offensive Security's package repositories and their generous catalog
of extremely handy penetration testing tools. This project is possible because
of Offensive Security actually sticking to good practices and keeping their
packages well-organized, so thanks OffSec! :)
#TryHarder
"""
# TODO: Finish Install Script
# TODO: Categorize tool searches
# TODO: Categorization of repos is a big task to be done later
# TODO: Include package management
import argparse
import packmgr as packager
from utils import * # includes sys, os
prog_info = "GIT Kali Project"
__author__ = "True Demon"
__winstall__ = "C:\\ProgramFiles\\GitKali\\" # Default package installation directory for Windows
__linstall__ = "/usr/share" # Default package installation directory for Linux
__install__ = "" # Used to store default install directory based on OS
try:
if os.name == 'posix':
__install__ = __linstall__
if os.getuid():
print("You need to be root to install packages. Try again as sudo.")
sys.exit()
elif os.name == 'nt':
__install__ = __winstall__
from ctypes import windll
if not windll.shell32.IsUserAnAdmin():
print("You must be an administrator to install packages. Please run from an escalated cmd.")
else:
sys.stderr("Could not detect your privileges / operating system. "
"This script only supports Linux (Posix) and Windows (nt) systems.")
except OSError:
sys.stderr("Unknown Operating System detected. You must have invented this one yourself! Teach me, Senpai!")
exit()
except ImportError as e:
sys.stderr("Invalid or missing libraries: \n%s" % e)
def search(search_word):
# search function for valid packages to install
found = []
with open('kali-packages.lst', 'r') as file:
packages = file.readlines()
for p in packages:
if search_word in p.split()[0]:
found.append(p.split()[0])
if not len(found):
print(Symbol.fail + " Could not find any matching packages")
return None
print("Found packages: ")
print(' '.join(found))
def check_install_dir(install_dir=__install__):
if os.path.exists(install_dir):
try:
os.chdir(install_dir)
if os.getcwd() != install_dir:
print("Something went wrong. We can't get to your installation directory: %s" % install_dir)
sys.exit()
except OSError:
print("Somehow, you broke it. Dunno how ya did it, but a bug report would be mighty handy to figure out how!")
sys.exit(-1)
def main():
parser = argparse.ArgumentParser(prog='gitkali.py', description='The apt-like Kali package installer for Linux',
epilog=prog_info, formatter_class=argparse.RawTextHelpFormatter)
parser._positionals.title = "Commands"
parser.add_argument("command", choices=["search", "install", "update", "upgrade"],
help="search : search package list for compatible packages\n" +
"install : install specified package\n" +
"update : update package lists\n" +
"upgrade : upgrade kali packages\n\n"
)
parser.add_argument("packages", action='store', metavar='package', nargs='*', help="package(s) to upgrade/install")
parser.add_argument("-d", "--directory", action='store', default=__install__,
help="Alternate installation directory")
args = parser.parse_args()
packages = [str(p) for p in args.packages] # Converts args.package(tuple) to list of strings for ease of use
args.directory = os.path.abspath(args.directory)
if args.command == 'search':
packager.check_kali_packages()
for p in packages:
search(p)
elif args.command == 'update':
packager.get_updates()
exit()
elif args.command == 'upgrade':
packager.upgrade(packages, args.directory)
elif args.command == 'install':
if len(packages) == 0 :
print("No packages given")
if '*' in packages:
# NEVER EVER EVE | R EVER EEEEEEEVVVVVEEEEEEEEEEEERRRRRRRRRRR DO THIS!!!
# TODO: EVENTUALLY...build a way for this to work safely...
| packager.install_all(args.directory)
if args.directory != __install__: # Usually /usr/share/
check_install_dir(args.directory) # Check that the directory exists
warn_non_standard_dir(args.directory) # Warn the user that this is not advised
response = input("Do you wish to proceed?: [y/N]") # Confirm decision
if response.upper() != 'Y':
exit()
packages_to_install = packager.get_local_packages(packages)
# Returns a dictionary ex: {package_name: package_url}
for p in packages_to_install:
print("Proceeding with install: ", p)
packager.install(p, packages_to_install[p], args.directory) # install(package_name, url, into directory)
if __name__ == "__main__":
main()
|
PiRSquared17/emacs-freex | test_regex.py | Python | gpl-3.0 | 465 | 0.008602 | #!/usr/bi | n/python
import os, re
aliases = os.listdir('/Users/greg/Documents/freex/')
aliases = [a.lower() for a in aliases if '.freex' in a]
aliases.sort(reverse=True)
aliases = [re.escape(a.lower()) for a in aliases]
aliasRegexpStr = '\\b'+'\\b|\\b'.join(aliases)+'\\b'
aliasRegexpStr = aliasRegexpStr.replace('\\ ', ' ?\\\n? *')
aliasRegexpStr = aliasRegexpStr.replace('\\ ', ' ?\\\n? *')
impLinkRegexp = re.compile(aliasRegexpStr,re.IGNORECASE| | re.MULTILINE)
|
Gitweijie/first_project | networking_cisco/backwards_compatibility.py | Python | apache-2.0 | 5,581 | 0 | # Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from types import ModuleType
from distutils.version import StrictVersion
from neutron.plugins.ml2.drivers import type_tunnel
from neutron import version
# Some constants and verifier functions have been deprecated but are still
# used by earlier releases of neutron. In order to maintain
# backwards-compatibility with stable/mitaka this will act as a translator
# that passes constants and functions according to version number.
NEUTRON_VERSION = StrictVersion(str(version.version_info))
NEUTRON_NEWTON_VERSION = StrictVersion('9.0.0')
NEUTRON_OCATA_VERSION = StrictVersion('10.0.0')
NEUTRON_PIKE_VERSION = StrictVersion('11.0.0')
n_c = __import__('neutron.common.constants', fromlist=['common.constants'])
constants = __import__('neutron_lib.constants', fromlist=['constants'])
if NEUTRON_VERSION >= NEUTRON_NEWTON_VERSION:
from neutron.conf import common as base_config
from neutron_lib.api import validators
is_attr_set = validators.is_attr_set
validators = validators.validators
n_c_attr_names = getattr(n_c, "_mg__my_globals", None)
else:
from neutron.api.v2 import attributes
from neutron.common import config as base_config
n_c_attr_names = n_c.my_globals
is_attr_set = attributes.is_attr_set
validators = attributes.validators
setattr(constants, 'ATTR_NOT_SPECIFIED', getattr(attributes,
'ATTR_NOT_SPECIFIED'))
if NEUTRON_VERSION >= NEUTRON_OCATA_VERSION:
from neutron.db.models import agent as agent_model
from neutron.db.models import l3 as l3_models
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as providernet
from neutron_lib.api import extensions
from neutron_lib.db import model_base
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from neutron_lib.utils import helpers as common_utils
try:
from neutron import context
except ImportError:
from neutron_lib import context
get_plugin = directory.get_plugin
n_c_attr_names = dir(n_c)
HasProject = model_base.HasProject
VXLAN_TUNNEL_TYPE = type_tunnel.ML2TunnelTypeDriver
Agent = agent_model.Agent
RouterPort = l3_models.RouterPort
Router = l3_models.Router
def get_context():
return context.Context()
def get_db_ref(context):
return context
def get_tunnel_session(context):
return context.session
def get_novaclient_images(nclient):
return nclient.glance
else:
from neutron.api import extensions # noqa
from neutron.common import utils as common_utils # noqa
from neutron import context
from neutron.db import agents_db
from neutron.db import api as db_api
from neutron.db import l3_db
from neutron.db import model_base # noqa
from neutron.db import models_v2
from neutron.extensions import portbindings # noqa
from neutron.extensions import providernet # noqa
from neutron import manager
from neutron.plugins.common import constants as svc_constants
from neutron.services import service_base # noqa
def get_plugin(service=None):
if service is None:
return manager.NeutronManager.get_plugin()
else:
return manager.NeutronManager.get_service_plugins().get(service)
HasProject = models_v2.HasTenant
setattr(constants, 'L3', getattr(svc_constants, 'L3_ROUTER_NAT'))
VXLAN_TUNNEL_TYPE = type_tunnel.TunnelTypeDriver
Agent = agents_db.Agent
RouterPort = l3_db.RouterPort
Router = l3_db.Router
def get_context():
return None
def get_db_ref(context):
return db_api.get_session()
def get_tunnel_session(context):
return contex | t
def get_novaclient_images(nclient):
return nclient.images
if NEUTRON_VERSION >= NEUTRON_PIKE_VERSION:
from neutron.conf.agent import common as config
else:
from neutron.agent.common import config # noqa
core_opts = base_config.core_opts
# Bring in the union of all constants in neutron.common.constants
# and neutron_lib.constants. Handle any duplicates by using the
# values | in neutron_lib.
#
# In the plugin code, replace the following imports:
# from neutron.common import constants
# from neutron_lib import constants
# with (something like this):
# from networking_cisco import backward_compatibility as bc
# Then constants are referenced as shown in this example:
# port['devide_owner'] = bc.constants.DEVICE_OWNER_ROUTER_INTF
ignore = frozenset(['__builtins__', '__doc__', '__file__', '__name__',
'__package__', '__path__', '__version__'])
for attr_name in n_c_attr_names:
attr = getattr(n_c, attr_name)
if attr_name in ignore or isinstance(attr, ModuleType):
continue
else:
setattr(constants, attr_name, attr)
del n_c, ignore, attr_name, attr
|
joebowen/ChannelWorm | channelworm/fitter/__init__.py | Python | mit | 71 | 0.070423 | __all | __ = ["Initiator","Simulator","Evaluator","M | odelator","Validator"] |
jimyx17/jimh | lib/MultipartPostHandler.py | Python | gpl-3.0 | 3,642 | 0.005491 | #!/usr/bin/python
####
# 06/2010 Nic Wolfe <nic@wolfeden.ca>
# 02/2006 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
import urllib
import urllib2
import mimetools, mimetypes
import os, sys
# Controls how sequences are uncoded. If true, elements may be give | n multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not N | one and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) in (file, list, tuple):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
@staticmethod
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = ''
for(key, value) in vars:
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"' % key
buffer += '\r\n\r\n' + value + '\r\n'
for(key, fd) in files:
# allow them to pass in a file or a tuple with name & data
if type(fd) == file:
name_in = fd.name
fd.seek(0)
data_in = fd.read()
elif type(fd) in (tuple, list):
name_in, data_in = fd
filename = os.path.basename(name_in)
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buffer += 'Content-Type: %s\r\n' % contenttype
# buffer += 'Content-Length: %s\r\n' % file_size
buffer += '\r\n' + data_in + '\r\n'
buffer += '--%s--\r\n\r\n' % boundary
return boundary, buffer
https_request = http_request |
nicholasmalaya/arcanus | disputatio/routines/drag_polars/90.py | Python | mit | 2,860 | 0.016783 | import numpy as np
import matplotlib.pyplot as plt
def poly_print(c,name):
#
# prints coefficients for lift or drag functions
#
p=0
print ''
for item in c:
# last step
if(p == len(c)-1):
print ' '+str(item)+'*x^'+str(p)+"'",
# first step
elif(p==0):
print name +" = '"+str(item)+'*x^'+str(p)+" + "
p=p+1
# middle steps
else:
print ' '+str(item)+'*x^'+str(p)+" + "
p=p+1
print ''
return 0
def reader(fl):
#
# reads a file and returns function values
#
angle = []
cd = []
# open
fo = open(fl, "r")
o
#
# iterate
#
for line in fo:
two_var = line.split()
angle.append(float(two_var[0].split(',')[0]))
cd.append(float(two_var[1]))
# close
fo.close()
return angle, cd
# -----------------------
# main function
# -----------------------
#
# http://docs.scipy.org/doc/numpy/reference/routines.polynomials.poly1d.html
#
# grab original data
#
f1 = "cd_90.dat"
f2 = "cl_90.dat"
anglecd, cd = reader(f1)
anglecl, cl = reader(f2)
#
# plot interpolated function
#
# theta := ((t+pi/2)%pi)-pi/2
# lift = ' if ( abs ( theta )< pi / 24 , theta * 9 , sin ( 2 * theta ) ) '
# drag = if(abs(theta)<pi/24,0.005+theta*theta*81/25, 1-0.8cos(2*theta))
#
#rad = np.linspace(0.0, 360.0)
#
anglecd = -1 + 2*np.array(anglecd)/360.0
anglecl = -1 + 2*np.array(anglecl)/360.0
#
# interpolate!
# interp1d(x, y, kind='cubic')
#
#from scipy.interpolate import interp1d
from numpy.polynomial import polynomial as P
inter_cd,stat_cd = P.polyfit(anglecd, cd, 16,full=True)
inter_cl,stat_cl = P.polyfit(anglecl, cl, 16,full=True)
rad = np.linspace(-0.99, 0.99)
t = (rad+np.pi)%np.pi - np.pi/2.0
#anglei = (rad+1)*180.
anglei = rad
#cdi = np.cos(2 | * np.pi * rad) * np.exp(-rad)
#cli = np.cos(2 * np.pi * rad)
#cli = np.where(abs(t) > np.pi/24., t, np.sin(2*t))
#cdi = np.where(abs(t) > np.pi/24., t*t*81/25., 1-0.8*np.cos(2*t))
pol | y_print(inter_cl,'lift')
poly_print(inter_cd,'drag')
#
# plot!
#
sz=25
plt.subplot(2, 1, 1)
plt.plot(anglecd, cd, 'ko-',label='COMSOL Data')
plt.plot(anglei, P.polyval(anglei,inter_cd), color='blue',label='Interpolant')
#plt.title(r'Coefficients of Drag/Lift as functions of $\alpha$')
#plt.subtitle(r'Coefficients of Drag/Lift as functions of $\alpha$')
#plt.xlabel(r'$\alpha$')
plt.ylabel(r'$C_d$',size=sz)
plt.legend()
plt.xlim([-1,1])
#plt.xlim([-np.pi,np.pi])
plt.ylim([-0.1,3.5])
plt.subplot(2, 1, 2)
plt.plot(anglecl, cl, 'ko-',label='COMSOL Data')
plt.plot(anglei, P.polyval(anglei,inter_cl), color='blue',label='Interpolant')
plt.ylabel(r'$C_l$',size=sz)
plt.xlabel(r'$\alpha$',size=sz)
plt.legend()
plt.xlim([-1,1])
plt.ylim([-3,3])
plt.savefig("90.png")
#
# nick
# 3/30/15
#
|
meerkat-cv/annotator-supreme | annotator_supreme/views/view_tools.py | Python | mit | 5,904 | 0.003557 | from flask import request, abort, session
from functools import wraps
import logging
import urllib.request as urllib2
import numpy as np
import cv2
import random
from annotator_supreme.views import error_views
from io import StringIO
from PIL import Image
from annotator_supreme import app
import os
import base64
def read_image_from_stream(stream):
try:
arr = np.asarray(bytearray(stream.read()), dtype=np.uint8)
image = cv2.imdecode(arr, cv2.IMREAD_COLOR)
height, width = image.shape[:2]
if height <= 0 or width <= 0:
raise Exception('Invalid image file from stream')
except:
raise Exception('Invalid image file from stream')
return image
def read_image_fro | m_url(url):
req = urllib2.Request(url, headers={'User-Agent' : "VirtualMakeup-API"})
res = urllib2.urlopen(req)
if res.getcode() != 200:
raise Exception('Invalid status code '+str(res | .getcode())+' from image url')
else:
return read_image_from_stream(res)
def read_image_b64(base64_string):
dec = base64.b64decode(base64_string)
npimg = np.fromstring(dec, dtype=np.uint8)
cvimg = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
return cvimg
def image_to_dict(image):
anno_vec = []
for bb in image.bboxes:
curr_anno = {}
curr_anno['labels'] = bb.labels
curr_anno['left'] = bb.left
curr_anno['top'] = bb.top
curr_anno['right'] = bb.right
curr_anno['bottom'] = bb.bottom
curr_anno['ignore'] = bb.ignore
anno_vec.append(curr_anno)
image_dict = {'anno': anno_vec}
image_dict['dataset_name'] = image.dataset_name
image_dict['name'] = image.name
image_dict['phash'] = image.phash
image_dict['category'] = image.category
image_dict['partition'] = image.partition
image_dict['fold'] = image.fold
image_dict['last_modified'] = image.last_modified
return image_dict
def parse_content_type(request):
"""
This function is used to extract the content type from the header.
"""
try:
content_type = request.headers['content-type']
except:
raise error_views.InvalidParametersError('No Content-Type provided')
json_type = 'application/json'
data_type = 'multipart/form-data'
lower_content_type = content_type.lower()
if lower_content_type.find(json_type) >= 0:
return json_type
elif lower_content_type.find(data_type) >= 0:
return data_type
else:
raise error_views.InvalidParametersError('Invalid Content-Type')
def get_param_from_request(request, label):
"""
This function is used to extract a field from a POST or GET request.
Returns a tuple with (ok:boolean, error:string, value)
"""
if request.method == 'POST':
content_type = parse_content_type(request)
if content_type == "multipart/form-data":
if label in request.form:
return (True, "", request.form[label])
else:
return (False, "No "+label+" provided in form-data request", None)
elif content_type == 'application/json':
try:
input_params = request.get_json(True)
except:
return (False, 'No valid JSON present', None)
if label in input_params:
return (True, "", input_params[label])
else:
return (False, "No "+label+" provided in json payload", None)
elif request.method == 'GET':
if request.args.get(label) == None:
return (False, "No "+label+" in GET params", None)
else:
return (True, "", request.args.get(label))
else:
return (False, "Invalid request method", None)
def get_image_from_request(request):
"""
This function is used to extract the image from a POST or GET request.
Usually it is a url of the image and, in case of the POST is possible
to send it as a multi-part data.
Returns a tuple with (ok:boolean, error:string, image:ndarray)
"""
if request.method == 'POST':
content_type = parse_content_type(request)
if content_type == "multipart/form-data":
if 'image' in request.files:
try:
image = read_image_from_stream(request.files['image'])
return (True, '', image)
except:
return (False, "Unable to read uploaded file", None)
else:
return (False, "No image provided in form-data request", None)
elif content_type == 'application/json':
try:
input_params = request.get_json(True)
except:
return (False, 'No valid JSON present', None)
if 'imageUrl' in input_params:
image_url = input_params['imageUrl']
try:
image = read_image_from_url(image_url)
return (True, '', image)
except:
return (False, 'Unable to read image from url', None)
elif 'imageB64' in input_params:
image_b64 = input_params['imageB64']
try:
image = read_image_b64(image_b64)
return (True, '', image)
except:
return (False, 'Unable to read base 64 image', None)
else:
return (False, 'Image url or base 64 string not informed', None)
elif request.method == 'GET':
if request.args.get('imageUrl') == None:
return (False, 'Image url not informed', None)
else:
image_url = request.args.get('imageUrl')
try:
image = read_image_from_url(image_url)
return (True, '', image)
except:
return (False, 'Unable to read image from url', None)
|
richard-fisher/repository | system/base/libffi/actions.py | Python | gpl-2.0 | 355 | 0.019718 |
#!/usr/bin/python
from pisi.actionsapi i | mport shelltools, get, autotools, pisitools
def setup():
autotools.configure ("--prefix=/usr\
--disable-static")
def build():
autotools.make ()
def install( | ):
autotools.rawInstall ("DESTDIR=%s" % get.installDIR())
pisitools.dodoc ("README", "ChangeLog", "LICENSE")
|
sirech/deliver | deliver/converter/simple.py | Python | mit | 5,674 | 0.001586 | import email
import re
from cStringIO import StringIO
from email.charset import add_charset, Charset, QP
from email.generator import Generator
from email.header import decode_header, Header
from utils import to_unicode
# Globally replace base64 with quoted-printable
add_charset('utf-8', QP, QP, 'utf-8')
class UnicodeMessage(object):
'''
Wrapper around a email.message.Message, that allows to interact
with the message using decoded unicode strings.
Part of the interface to Message is supported. The interface
methods return normal unicode strings, with the email-specific
encoding parts removed.
The underlying message might be transformed by this class and should not
be used elsewhere.
'''
def __init__(self, msg):
'''
Create a message that is fully utf-8 encoded.
msg is the original message.
'''
if not isinstance(msg, email.message.Message):
raise TypeError('msg is not a Message')
self._msg = msg
charset = msg.get_content_charset() or 'utf-8'
self._body_charset = Charset(input_charset=charset)
assert self._body_charset.header_encoding in [None, QP]
assert self._body_charset.body_encoding in [None, QP]
if not self._msg.has_key('Subject'):
self._msg.add_header('Subject', '')
def __str__(self):
return self.as_string()
@property
def id(self):
return self['Message-Id']
def as_string(self):
"""
Returns the message as a string encoded with utf-8, avoiding the escaping
of 'From' lines.
"""
io = StringIO()
g = Generator(io, False) # second argument means "should I mangle From?"
g.flatten(self._msg)
return io.getvalue()
# Delegate to Message
def __getitem__(self, name):
'''Get a header value, from the message, decoded and as a
unicode string.
If the header does not exist, None is returned'''
value = self._msg[name]
if value is None:
return None
return u''.join(to_unicode(*tupl) for tupl in decode_header(value))
def replace_header(self, name, value):
'''Forwards the call to replace_header.
name the id of the header. If it does not exist yet, it is
newly created. This behavior is different from the standard
message.
value is passed as a unicode string. This method tries to
avoid encoding the value with a Header (i.e when the value is
an ascii string).
'''
assert isinstance(value, unicode)
try:
header = value.encode('ascii')
except UnicodeEncodeError:
header = Header(value.encode('utf-8'), 'UTF-8').encode()
if self._msg.has_key(name):
self._msg.replace_header(name, header)
else:
self._msg.add_header(name, header)
def get_payload(self, i=None, decode=False):
'''
Forwards the call to get_payload.
Instances of the type email.message.Message are wrapped as a
UnicodeMessage. Strings are returned as unicode.
'''
payload = self._msg.get_payload(i, decode)
if isinstance(payload, list):
return [UnicodeMessage(msg) for msg in payload]
elif isinstance(payload, email.message.Message):
return UnicodeMessage(payload)
elif isinstance(payload, str):
return to_unicode(payload, self._msg.get_content_charset())
return payload
def get_clean_payload(self, forbidden_words):
'''
Gets a text payload, with the given forbidden words replaced.
forbidden_words a dictionary containing pairs of
(word_to_replace, replacement).
'''
assert isinstance(forbidden_words, dict)
payload = self.get_payload(decode=True)
assert isinstance(payload, unicode)
payload = payload.split('\n')
return '\n'.join(
' '.join(self._clean_word(word, forbidden_words) for word in line.split(' '))
for line in payload)
def _clean_word(self, word, forbidden_words):
'''
Returns a replacement if the given word is in the forbidden
words dictionary. Otherwise, the word is returned unchanged.
The word is striped of punctuation (i.e. period, asterisks)
and converted to lower for the comparison.
'''
punctuation = '.!?*()\'"[]-_+=:;<>,/'
match = word.lower().strip(punctuation)
if match in forbidden_words:
replacement = forbidden_words[match]
word = re.sub(match, replacement, word, flags=re.IGNORECASE)
return word
def set_payload(self, payload):
'''
Forwards the call to set_payload.
If the payload is text, it is passed as a unicode string. Text
is encoded again before being passed. The content encoding is
changed to quoted printable to avoid encoding
incompatibilities.
'''
assert not isinstance(payload, str)
if isinstance(payload, unicode):
self.replace_header('Content-Transfer-Encoding', u'quoted-printable')
payload = self._body_charset.body_encode(
payload.encode(self._body_charset.input_charset), convert=False)
self._msg.set_payload(payload)
from email.Iterators import walk
def __getattr__(self, name):
return getattr(self._msg, n | ame | )
|
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 2/instances/11_0_wikiflow_1sh_1s_annot/longestsession_8/ConfigDB_Longest_8.py | Python | gpl-3.0 | 983 | 0.014242 | HOST = "wfSciwoncWiki:enw1989@172.31.29.101:27001,172.31.29.102:27001,172.31.29.103:27001,172.31.29.104:27001,172.31.29.105:27001,172.31.29.106:27001,172.31.29.107:27001,172.31.29.108:27001,172.31.29.109:27001/?authSource=admin"
P | ORT = ""
USER = ""
PASSWORD = ""
DATABASE = "wiki"
READ_PREFERENCE = "primary"
COLLECTION_INPUT = "user_sessions"
COLLECTION_OUTPUT = "top_sessions"
PREFIX_COLUMN = "w_"
ATTRIBUTES = ["duration", "start time", "end time", "contributor_username", "edition_counts"]
SORT = ["duration", "end time"]
OPERATION_TYPE = "GROUP_BY_FIXED_WINDOW"
COLUMN = " | end time"
VALUE = [(1236381526, 1238973525),(1238973526, 1241565525),(1241565526, 1244157525),(1244157526, 1246749525),(1246749526, 1249341525),(1249341526, 1251933525),(1251933526, 1254525525),(1254525526, 1257113925),(1257113926, 1259705925),(1259705926, 1262297925),(1262297926, 1264889925),(1264889926, 1265098299)]
INPUT_FILE = "user_info.csv"
OUTPUT_FILE = "top_sessions_8.csv"
|
funkyfuture/inxs | tests/test_cli.py | Python | agpl-3.0 | 515 | 0.001942 | from pathlib import Path
from inxs.cli import main as _main
from tests import equal_documents
def main(*args):
_args = ()
for arg in | args:
if isinstance(arg, Path):
_args += (str(arg),)
else:
_args += (arg,)
_ | main(_args)
# TODO case-study with this use-case
def test_mods_to_tei(datadir):
main("--inplace", datadir / "mods_to_tei.py", datadir / "mods_to_tei.xml")
assert equal_documents(datadir / "mods_to_tei.xml", datadir / "mods_to_tei_exp.xml")
|
system7-open-source/imamd | imam/imam/preset/local_preset.py | Python | agpl-3.0 | 823 | 0.010936 | """local_preset.py is imported by default_settings.py when no URL environment variable is defined.
"""
#
# Alter this skeleto | n to agree with the needs of your local environment
# Note: if you are using a URL 12-Factor configuration scheme, you will not be using this file
# important thing we do here is to import all those symbols that are defined in settin | gs.py
from ..settings import * # get most settings from ../settings.py
# or perhaps you would prefer something like:
# from staging import * # which in turn imports ../settings.py
# # # and now you can override the settings which we just got from settings.py # # # #
# for example, choose a different database...
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': 'db.sqlite',
# }
#}
# or:
#DEBUG = True
|
dribnet/dagbldr | examples/faces_vae/flying_faces_vae.py | Python | bsd-3-clause | 2,380 | 0.001261 | import argparse
import numpy as np
import os
from dagbldr.datasets import fetch_fer
from dagbldr.utils import load_checkpoint, interpolate_between_points, make_gif
parser = argparse.ArgumentParser()
parser.add_argument("saved_functions_file",
help="Saved pickle file from vae training")
parser.add_argument("--seed", "-s",
help="random seed for path calculation",
action="store", default=1979, type=int)
args = parser.parse_args()
if not os.path.exists(args.saved_functions_file):
raise ValueError("Please provide a valid path for saved pickle file!")
checkpoint_dict = load_checkpoint(args.saved_functions_file)
encode_function = checkpoint_dict["encode_function"]
decode_function = checkpoint_dict["decode_function"]
fer = fetch_fer()
data = fer["data"]
valid_indices = fer["valid_indices"]
valid_data = data[valid_indices]
mean_norm = fer["mean0"]
pca_tf = fer["pca_matrix"]
X = valid_data - mean_norm
X = np.dot(X, pca_tf.T)
random_state = np.random.RandomState(args.seed)
# number of samples
n_plot_samples = 5
# tfd dimensions
width = 48
height = 48
# Get random data samples
ind = np.arange(len(X))
random_state.shuffle(ind)
sample_X = X[ind[:n_plot_samples]]
def gen_samples(arr):
mu, | log_sig = encode_function(arr)
| # No noise at test time
out, = decode_function(mu + np.exp(log_sig))
return out
# VAE specific plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
samples = gen_samples(sample_X)
samples = np.dot(samples, pca_tf) + mean_norm
f, axarr = plt.subplots(n_plot_samples, 2)
for n, (X_i, s_i) in enumerate(zip(np.dot(sample_X, pca_tf) + mean_norm,
samples)):
axarr[n, 0].matshow(X_i.reshape(width, height), cmap="gray")
axarr[n, 1].matshow(s_i.reshape(width, height), cmap="gray")
axarr[n, 0].axis('off')
axarr[n, 1].axis('off')
plt.savefig('vae_reconstruction.png')
plt.close()
# Calculate linear path between points in space
mus, log_sigmas = encode_function(sample_X)
mu_path = interpolate_between_points(mus)
log_sigma_path = interpolate_between_points(log_sigmas)
# Path across space from one point to another
path = mu_path + np.exp(log_sigma_path)
out, = decode_function(path)
out = np.dot(out, pca_tf) + mean_norm
make_gif(out, "vae_code.gif", width, height, delay=1, grayscale=True)
|
cstbox/devel | bin/cbx-2to3.py | Python | lgpl-3.0 | 821 | 0.002439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Data filter converting CSTBox v2 event logs to v3 format.
Usage: ./cbx-2to3.py < /path/ | to/input/file > /path/to/output/file
"""
__author__ = 'Eric Pascual - CSTB (eric.pascual@cstb.fr)'
import fileinput
import json
for line in fileinput.input():
ts, var_type, var_name, value, data = line.split('\t')
# next 3 lines are specific to Actility box at home files conversion
if var_name.s | tartswith('home.'):
var_name = var_name[5:]
var_name = '.'.join((var_type, var_name))
data = data.strip().strip('{}')
if data:
pairs = data.split(',')
data = json.dumps(dict([(k.lower(), v) for k, v in (pair.split('=') for pair in pairs)]))
else:
data = "{}"
print('\t'.join((ts, var_type, var_name, value, data)))
|
velorientc/git_test7 | tests/hglib_encoding_test.py | Python | gpl-2.0 | 3,473 | 0.004607 | """Test for encoding helper functions of tortoisehg.util.hglib"""
from nose.tools import *
from tortoisehg.util import hglib
import helpers
JAPANESE_KANA_I = u'\u30a4' # Japanese katakana "i"
@helpers.with_encoding('utf-8')
def test_none():
"""None shouldn't be touched"""
for e in ('fromunicode', 'fromutf', 'tounicode', 'toutf'):
f = getattr(hglib, e)
assert_equals(None, f(None))
@helpers.with_encoding('utf-8')
def test_fromunicode():
assert_equals(JAPANESE_KAN | A_I.encode('utf-8'),
hglib.fromunicode(JAPANESE_KANA_I))
@helpers.with_encoding('utf-8')
def test_fromunicode_unicodableobj():
"""fromunicode() accepts unicode-able obj like QString"""
class Unicodable(object):
def __unicode__(self):
return JAPANESE_KANA_I
assert_equals(JAPANESE_KANA_I.encode('utf-8'),
hglib.fromunicode(Unicodable()))
@helpers.with_encoding('ascii', 'utf-8')
def test_fromunicode_fallback():
assert_equals( | JAPANESE_KANA_I.encode('utf-8'),
hglib.fromunicode(JAPANESE_KANA_I))
@helpers.with_encoding('ascii')
def test_fromunicode_replace():
assert_equals('?', hglib.fromunicode(JAPANESE_KANA_I,
errors='replace'))
@helpers.with_encoding('ascii')
def test_fromunicode_strict():
assert_raises(UnicodeEncodeError,
lambda: hglib.fromunicode(JAPANESE_KANA_I))
@helpers.with_encoding('euc-jp')
def test_fromutf():
assert_equals(JAPANESE_KANA_I.encode('euc-jp'),
hglib.fromutf(JAPANESE_KANA_I.encode('utf-8')))
@helpers.with_encoding('ascii', 'euc-jp')
def test_fromutf_fallback():
assert_equals(JAPANESE_KANA_I.encode('euc-jp'),
hglib.fromutf(JAPANESE_KANA_I.encode('utf-8')))
@helpers.with_encoding('ascii')
def test_fromutf_replace():
assert_equals('?', hglib.fromutf(JAPANESE_KANA_I.encode('utf-8')))
@helpers.with_encoding('euc-jp')
def test_tounicode():
assert_equals(JAPANESE_KANA_I,
hglib.tounicode(JAPANESE_KANA_I.encode('euc-jp')))
@helpers.with_encoding('ascii', 'euc-jp')
def test_tounicode_fallback():
assert_equals(JAPANESE_KANA_I,
hglib.tounicode(JAPANESE_KANA_I.encode('euc-jp')))
@helpers.with_encoding('euc-jp')
def test_toutf():
assert_equals(JAPANESE_KANA_I.encode('utf-8'),
hglib.toutf(JAPANESE_KANA_I.encode('euc-jp')))
@helpers.with_encoding('ascii', 'euc-jp')
def test_toutf_fallback():
assert_equals(JAPANESE_KANA_I.encode('utf-8'),
hglib.toutf(JAPANESE_KANA_I.encode('euc-jp')))
@helpers.with_encoding('ascii')
def test_lossless_unicode_replaced():
l = hglib.fromunicode(JAPANESE_KANA_I, 'replace')
assert_equals('?', l)
assert_equals(JAPANESE_KANA_I, hglib.tounicode(l))
@helpers.with_encoding('euc-jp')
def test_lossless_unicode_double_mapped():
YEN = u'\u00a5' # "yen" and "back-slash" are mapped to the same code
l = hglib.fromunicode(YEN)
assert_equals('\\', l)
assert_equals(YEN, hglib.tounicode(l))
@helpers.with_encoding('ascii')
def test_lossless_utf_replaced():
u = JAPANESE_KANA_I.encode('utf-8')
l = hglib.fromutf(u)
assert_equals('?', l)
assert_equals(u, hglib.toutf(l))
@helpers.with_encoding('ascii')
def test_lossless_utf_cannot_roundtrip():
u = JAPANESE_KANA_I.encode('cp932') # bad encoding
l = hglib.fromutf(u)
assert_not_equals(u, hglib.toutf(l))
|
shobhitmishra/CodingProblems | epi_judge_python/max_safe_height.py | Python | mit | 308 | 0 | from test_framework import generic_test
def get_height(cases: int, drops: int) -> int:
# TODO - you fill in here. |
return 0
if __name__ == '__main__':
exit(
generic_test.generic_test_main('max_safe_height.py',
| 'max_safe_height.tsv', get_height))
|
BjerknesClimateDataCentre/QuinCe | DataPreparation/sqlite/sqlite_extractor.py | Python | gpl-3.0 | 2,156 | 0.016234 | import sys, os
import toml
import pandas as pd
import numpy as np
from DatabaseExtractor import DatabaseExtractor
def check_output_config(config):
""" Check that the configuration is valid"""
ok = True
if config['output']['sort_column'] not in config['output']['columns']:
print("Sort column not in output columns list")
ok = False
return ok
##########################################################
config = None
sqlite_file = None
out_file = None
try:
config_file = sys.argv[1]
with open(config_file, "r") as config_chan:
config = toml.loads(config_chan.read())
sqlite_file = sys.argv[2]
out_file = sys.argv[3]
except IndexError:
print("Usage: sqlite_extractor.py [config_file] [sqlite_file] [output_file]")
exit()
# Check configuration. Error messages printed in check function
if not check_output_config(config):
exit()
extractor = None
try:
# Initialise the database connection and check database config
extractor = DatabaseExtractor(sqlite_file, config)
# Extract all tables
all_datasets = []
for table in config['input']['tables']:
all_datasets.append(extractor.get_dataset(table['name']))
# Join and sort datasets
all_data = pd.concat(all_datasets)
all_data.sort_values(by=config['output']['sort_column'], inplace=True)
# Replace missing values
all_data.fillna(value=config['output']['empty_col_value'], inplace=True)
# Perform all mappings
if 'mappings' in config['column_mapping']:
for col_map in config['column_mapping']['mappings']:
mapped_values = []
all_data[col_map['column']] = all_data[col_map['column']].astype(str)
for source, dest in col_map['mapping']:
all_data[col_map['column']].replace(source, dest, inplace=True)
mapped_values.append(dest)
column_index = all_data.columns.get_loc(col_map['column'])
for i in range(0, len(all_data[col_map['column']])):
if all_d | ata.iloc[i, column_index] not in mapped_values:
all_data.iloc[i, column_index] = col_map['other']
# | Write the final CSV
all_data.to_csv(out_file, index=False)
finally:
if extractor is not None:
del extractor |
aglne/Solandra | scripts/get_initial_tokens.py | Python | apache-2.0 | 148 | 0.02027 | #!/usr/bin/python
import sys
def tokens(nodes):
| for i in range(0, nodes):
print (i * (2 ** 127 - 1) / nodes)
tokens(int(sys.argv[1]) | )
|
wright-group/WrightTools | tests/kit/remove_nans_1D.py | Python | mit | 875 | 0.004571 | #! /usr/bin/env python3
"""Test remove_nans_1D."""
# --- import -------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
# --- test ---------------------------------------------------------------------------------------
def test_simple():
arr = np.arange(-4, 6, dtype=float)
arr[arr < 0] = np.nan
assert wt.kit.remove_nans_1D(arr)[0]. | all() == np.arange(0, 6, dtype=float).all()
def test_multiple():
arrs = [np.random.random(21) for _ in range(5)]
arrs[0][0] = np.nan
arrs[1][-1] = np.nan
arrs = wt.kit.remove_nans_1D(*arrs)
for arr in arrs:
assert arr.size == 19
de | f test_list():
assert np.all(wt.kit.remove_nans_1D([np.nan, 1, 2, 3])[0] == np.array([1, 2, 3]))
if __name__ == "__main__":
test_simple()
test_multiple()
test_list()
|
ariovistus/pyd | examples/interpcontext/setup.py | Python | mit | 286 | 0.006993 | from pyd.support import setup, Extension, pydexe_sanity_check
pydexe_sanity_check()
projName = 'interpcontext'
setu | p(
name=projName,
version='1.0',
ext_modules=[
Extension(projName, ['interpcontext.d'],
build_deimos | =True, d_lump=True
)
],
)
|
zenodo/invenio | invenio/legacy/websubmit/functions/Create_Upload_Files_Interface.py | Python | gpl-2.0 | 23,083 | 0.002643 | # $Id: Revise_Files.py,v 1.37 2009/03/26 15:11:05 jerome Exp $
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit function - Displays a generic interface to upload, delete
and revise files.
To be used on par with Move_Uploaded_Files_to_Storage function:
- Create_Upload_Files_Interface records the actions performed by user.
- Move_Uploaded_Files_to_Storage execute the recorded actions.
NOTE:
=====
- Due to the way WebSubmit works, this function can only work when
positionned at step 1 in WebSubmit admin, and
Move_Uploaded_Files_to_Storage is at step 2
FIXME:
======
- One issue: if we allow deletion or renaming, we might lose track of
a bibdoc: someone adds X, renames X->Y, and adds again another file
with name X: when executing actions, we will add the second X, and
rename it to Y
-> need to go back in previous action when renaming... or check
that name has never been used..
"""
__revision__ = "$Id$"
import os
from invenio.config import \
CFG_SITE_LANG
from invenio.base.i18n import gettext_set_language, wash_language
from invenio.legacy.bibdocfile.managedocfiles import create_file_upload_interface
def Create_Upload_Files_Interface(parameters, curdir, form, user_info=None):
"""
List files for revisions.
You should use Move_Uploaded_Files_to_Storage.py function in your
submission to apply the changes performed by users with this
interface.
@param parameters:(dictionary) - must contain:
+ maxsize: the max size allowed for uploaded files
+ minsize: the max size allowed for uploaded files
+ doctypes: the list of doctypes (like 'Main' or 'Additional')
and their description that users can choose from
when adding new files.
- When no value is provided, users cannot add new
file (they can only revise/delete/add format)
- When a single value is given, it is used as
default doctype for all new documents
Eg:
main=Main document|additional=Figure, schema. etc
('=' separates doctype and description
'|' separates each doctype/description group)
+ restrictions: the list of restrictions (like 'Restricted' or
'No Restriction') and their description that
users can choose from when adding/revising
files. Restrictions can then be configured at
the level of WebAccess.
- When no value is provided, no restriction is
applied
- When a single value is given, it is used as
default resctriction for all documents.
- The first value of the list is used as default
restriction if the user if not given the
choice of the restriction. CHOOSE THE ORDER!
Eg:
=No restriction|restr=Restricted
('=' separates restriction and description
'|' separates each restriction/description group)
+ canDeleteDoctypes: the list of doctypes that users are
allowed to delete.
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canReviseDoctypes: the list of doctypes that users are
allowed to revise
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canDescribeDoctypes: the list of doctypes that users are
allowed to describe
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canCommentDoctypes: the list of doctypes that users are
allowed to comment
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canKeepDoctypes: the list of doctypes for which users can
choose to keep previous versions visible when
revising a file (i.e. 'Keep previous version'
checkbox). See also parameter 'keepDefault'.
Note that this parameter is ~ignored when
revising the attributes of a file (comment,
description) without uploading a new
file. See also parameter
Move_Uploaded_Files_to_Storage.forceFileRevision
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canAddFormatDoctypes: the list of doctypes for which users can
add new formats. If there is no value,
then no 'add format' link nor warning
about losing old formats are displayed.
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canRestrictDoctypes: the list of doctypes for which users can
choose the access restrictions when adding or
revising a file. If no value is given:
- no restriction is applied if none is defined
in the 'restrictions' parameter.
- else the *first* value of the 'restrictions'
parameter is used as default restriction.
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canRenameDoctypes: the list of doctypes that users are allowed
to rename (when revising)
Eg:
Main|Additional
('|' separated values)
Use '*' for all doctypes
+ canNameNewFiles: if user can choose the name of the files they
upload ( | 1) or not (0)
+ defaultFilenameDoctypes: Rename uploaded files to admin-chosen
values. List here the the files in
current submission directory that
| contain the names to use for each doctype.
Eg:
Main=RN|Additional=additional_filename
('=' separates doctype and file in curdir
'|' separates each doctype/file group).
If the same doctype is submitted
several times, a"-%i" suffix is added
|
rajiteh/taiga-back | taiga/mdrender/service.py | Python | agpl-3.0 | 4,927 | 0.003655 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public Licens | e as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of | the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import functools
import bleach
# BEGIN PATCH
import html5lib
from html5lib.serializer.htmlserializer import HTMLSerializer
def _serialize(domtree):
walker = html5lib.treewalkers.getTreeWalker('etree')
stream = walker(domtree)
serializer = HTMLSerializer(quote_attr_values=True,
omit_optional_tags=False,
alphabetical_attributes=True)
return serializer.render(stream)
bleach._serialize = _serialize
# END PATCH
from django.core.cache import cache
from django.utils.encoding import force_bytes
from markdown import Markdown
from .extensions.autolink import AutolinkExtension
from .extensions.automail import AutomailExtension
from .extensions.semi_sane_lists import SemiSaneListExtension
from .extensions.spaced_link import SpacedLinkExtension
from .extensions.strikethrough import StrikethroughExtension
from .extensions.wikilinks import WikiLinkExtension
from .extensions.emojify import EmojifyExtension
from .extensions.mentions import MentionsExtension
from .extensions.references import TaigaReferencesExtension
from .extensions.target_link import TargetBlankLinkExtension
# Bleach configuration
bleach.ALLOWED_TAGS += ["p", "table", "thead", "tbody", "th", "tr", "td", "h1",
"h2", "h3", "h4", "h5", "h6", "div", "pre", "span",
"hr", "dl", "dt", "dd", "sup", "img", "del", "br",
"ins"]
bleach.ALLOWED_STYLES.append("background")
bleach.ALLOWED_ATTRIBUTES["a"] = ["href", "title", "alt", "target"]
bleach.ALLOWED_ATTRIBUTES["img"] = ["alt", "src"]
bleach.ALLOWED_ATTRIBUTES["*"] = ["class", "style"]
def _make_extensions_list(project=None):
return [AutolinkExtension(),
AutomailExtension(),
SemiSaneListExtension(),
SpacedLinkExtension(),
StrikethroughExtension(),
WikiLinkExtension(project),
EmojifyExtension(),
MentionsExtension(),
TaigaReferencesExtension(project),
TargetBlankLinkExtension(),
"extra",
"codehilite",
"sane_lists",
"toc",
"nl2br"]
import diff_match_patch
def cache_by_sha(func):
@functools.wraps(func)
def _decorator(project, text):
sha1_hash = hashlib.sha1(force_bytes(text)).hexdigest()
key = "{}-{}".format(sha1_hash, project.id)
# Try to get it from the cache
cached = cache.get(key)
if cached is not None:
return cached
returned_value = func(project, text)
cache.set(key, returned_value, timeout=None)
return returned_value
return _decorator
def _get_markdown(project):
extensions = _make_extensions_list(project=project)
md = Markdown(extensions=extensions)
md.extracted_data = {"mentions": [], "references": []}
return md
@cache_by_sha
def render(project, text):
md = _get_markdown(project)
return bleach.clean(md.convert(text))
def render_and_extract(project, text):
md = _get_markdown(project)
result = bleach.clean(md.convert(text))
return (result, md.extracted_data)
class DiffMatchPatch(diff_match_patch.diff_match_patch):
def diff_pretty_html(self, diffs):
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "<br />"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def get_diff_of_htmls(html1, html2):
diffutil = DiffMatchPatch()
diffs = diffutil.diff_main(html1, html2)
diffutil.diff_cleanupSemantic(diffs)
return diffutil.diff_pretty_html(diffs)
__all__ = ["render", "get_diff_of_htmls", "render_and_extract"]
|
GDG-JSS-NOIDA/programmr | programmr/app/migrations/0003_submission_ques_id.py | Python | gpl-3.0 | 580 | 0.001724 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-06 05:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170105_0206'),
]
operations = [
migrations.AddField(
model_name='submission',
name='ques_ID',
field=models.ForeignKey(defau | lt=1, on_delete=django.db.models.deletion. | CASCADE, to='app.Question'),
preserve_default=False,
),
]
|
agry/NGECore2 | scripts/mobiles/naboo/naboo_shaupaut_elder.py | Python | lgpl-3.0 | 1,691 | 0.027203 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('naboo_greater_shau | paut_elder')
mobileTemplate.setLevel(14)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(50)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setHideAmount(30)
mobileTemplate.setBoneType("Mammal | Hide")
mobileTemplate.setBoneAmount(30)
mobileTemplate.setSocialGroup("self")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_greater_shaupaut.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_2')
attacks.add('bm_defensive_2')
attacks.add('bm_disease_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('naboo_greater_shaupaut_elder', mobileTemplate)
return |
webcomics/dosage | tests/mocks/extra/dummy.py | Python | mit | 193 | 0 | # SPDX-Li | cense-Identifier: MIT
# Copyright (C) 2021 Tobias Gruetzmacher
from ..scraper import _ParserScraper
class AnotherDummyTestScraper(_ParserScraper):
url = | 'https://dummy.example/'
|
3dfxsoftware/cbss-addons | smile_base/__init__.py | Python | gpl-2.0 | 1,012 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This pro | gram is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Gen | eral Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_translation
import update
|
iulian787/spack | var/spack/repos/builtin/packages/perl-www-robotrules/package.py | Python | lgpl-2.1 | 637 | 0.006279 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlWwwRobotrules(PerlPackage):
"""Database of robots.txt-derived permissions"""
homepage = "http://deps.cpantesters.org/?module=WWW%3A%3ARobotRules;perl=latest"
url = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/WWW-RobotRules-6.02.tar.gz"
version('6.02', sha25 | 6='46b502e7a288d559429891eeb5d979461dd3ecc6a5c491 | ead85d165b6e03a51e')
depends_on('perl-uri', type=('build', 'run'))
|
hylje/lbtcex | lbtcex/main/forms.py | Python | bsd-3-clause | 307 | 0.003257 | from django import forms
METHOD_CHOICES = (
("GET", "GET"),
("POST", "POST")
)
class ApiCallForm(forms.Form):
method = forms.ChoiceField(choices=METHOD_CHO | ICES, initial="GET")
path = forms.CharField(initial="/api | /myself/")
data = forms.CharField(widget=forms.Textarea, required=False)
|
chenjm1217/ryu_app | L2Switch.py | Python | apache-2.0 | 3,340 | 0.008683 | #!/usr/bin/env python
import struct
import logging
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import mac_to_port
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class L2Switch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]#define the version of OpenFlow
def __init__(self, *args, **kwargs):
super(L2Switch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port = in_port, dl_dst = haddr_to_bin(dst))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath = datapath, match = match, cookie = 0,
command = ofproto.OFPFC_ADD, idle_timeout = 10,hard_timeout = 30,
priority = ofproto.OFP_DEFAULT_PRIORITY,
flags = ofproto.OFPFF_SEND_FLOW_REM, actions = actions)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id #get the dpid
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
ofp_parser = datapath.ofproto_parser
actions = [ofp_parser.OFPActionOutput(out_port)]
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath | , msg.in_port, dst, actions)
#We always send the packet_out to handle the first packet.
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
packet_out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions, data=data)
datapath.send_msg(packet_out)
#To show the message of ports' status.
@set_ev_cls(ofp_event.EventOFPPortStatu | s, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added %s", port_no)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted %s", port_no)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified %s", port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason)
|
rachel-fenichel/blockly | scripts/i18n/create_messages.py | Python | apache-2.0 | 6,375 | 0.009882 | #!/usr/bin/python3
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename)
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'): |
target_defs = read_json_fil | e(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}'.format(outname))
if __name__ == '__main__':
main()
|
qtproject/pyside-pyside | tests/signals/signal2signal_connect_test.py | Python | lgpl-2.1 | 4,620 | 0.007359 | # -*- coding: utf-8 -*-
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
''' Test case for signal to signal connections.'''
import unittest
from PySide2.QtCore import *
def cut | e_slot():
pass
class TestSignal2SignalConnect(unittest.TestCase):
'''Test case for signal to signal connections'''
def setUp(self):
#Set up the basic resources needed
self.sender = QObject()
self.for | warder = QObject()
self.args = None
self.called = False
def tearDown(self):
#Delete used resources
try:
del self.sender
except:
pass
try:
del self.forwarder
except:
pass
del self.args
def callback_noargs(self):
#Default callback without arguments
self.called = True
def callback_args(self, *args):
#Default callback with arguments
if args == self.args:
self.called = True
else:
raise TypeError("Invalid arguments")
def callback_qobject(self, *args):
#Default callback for QObject as argument
if args[0].objectName() == self.args[0]:
self.called = True
else:
raise TypeError("Invalid arguments")
def testSignalWithoutArguments(self):
QObject.connect(self.sender, SIGNAL("destroyed()"),
self.forwarder, SIGNAL("forward()"))
QObject.connect(self.forwarder, SIGNAL("forward()"),
self.callback_noargs)
del self.sender
self.assertTrue(self.called)
def testSignalWithOnePrimitiveTypeArgument(self):
QObject.connect(self.sender, SIGNAL("mysignal(int)"),
self.forwarder, SIGNAL("mysignal(int)"))
QObject.connect(self.forwarder, SIGNAL("mysignal(int)"),
self.callback_args)
self.args = (19,)
self.sender.emit(SIGNAL('mysignal(int)'), *self.args)
self.assertTrue(self.called)
def testSignalWithMultiplePrimitiveTypeArguments(self):
QObject.connect(self.sender, SIGNAL("mysignal(int,int)"),
self.forwarder, SIGNAL("mysignal(int,int)"))
QObject.connect(self.forwarder, SIGNAL("mysignal(int,int)"),
self.callback_args)
self.args = (23, 29)
self.sender.emit(SIGNAL('mysignal(int,int)'), *self.args)
self.assertTrue(self.called)
def testSignalWithOneStringArgument(self):
QObject.connect(self.sender, SIGNAL("mysignal(QString)"),
self.forwarder, SIGNAL("mysignal(QString)"))
QObject.connect(self.forwarder, SIGNAL("mysignal(QString)"),
self.callback_args)
self.args = ('myargument',)
self.sender.emit(SIGNAL('mysignal(QString)'), *self.args)
self.assertTrue(self.called)
def testSignalWithOneQObjectArgument(self):
QObject.connect(self.sender, SIGNAL('destroyed(QObject*)'),
self.forwarder, SIGNAL('forward(QObject*)'))
QObject.connect(self.forwarder, SIGNAL('forward(QObject*)'),
self.callback_qobject)
obj_name = 'sender'
self.sender.setObjectName(obj_name)
self.args = (obj_name, )
del self.sender
self.assertTrue(self.called)
if __name__ == '__main__':
unittest.main()
|
holly00shit/blogMM | app/models.py | Python | bsd-3-clause | 2,981 | 0.017108 | from app import db, app
from hashlib import md5
import flask.ext.whooshalchemy as whooshalchemy
ROLE_USER = 0
ROLE_ADMIN = 1
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
nickname = db.Column(db.String(64), unique = True)
email = db.Column(db.String(120), index = True, unique = True)
role = db.Column(db.SmallInteger, default = ROLE_USER)
posts = db.relationship('Post', backref = 'author', lazy = 'dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
followed = db.relationship('User',
secondary = followers,
primaryjoin = (followers.c.follower_id == id),
secondaryjoin = (followers.c.followed_id == id),
backref = db.backref('followers', lazy = 'dynamic'),
lazy = 'dynamic')
def __repr__(self):
return '<User %r>' % (self.nickname)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def avatar(self,size):
return 'http://www.gravatar.com/avatar/' + \
md5(self.email).hexdigest() + '?d=mm&s=' + str(size)
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname = nickname).first() == None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname = new_nickname).first() == None:
break
version += 1
return new_nickname
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self | , user):
return self.followed.filter(followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(followers,
(followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id == self.id).order_by(Post.timestamp.desc())
class Post(db.Model):
__tablename__ = 'post'
__searchable__ = [' | body']
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post %r>' % (self.body)
@classmethod
def all_posts(self):
return self.query.join(User,
(self.user_id == User.id)).order_by(Post.timestamp.desc())
whooshalchemy.whoosh_index(app, Post) |
reclosedev/lathermail | lathermail/compat.py | Python | mit | 421 | 0.002375 | import sys
IS_PY3 = sys. | version_info[0] == 3
if IS_PY3:
from http.client import NO_CONTENT
from email import encoders as Encoders
from urllib.parse import quote, urlencode
unicode = str
bytes = bytes
else:
from email import Encoders
from httplib import NO_CONTENT
fr | om urllib import quote, urlencode
unicode = unicode
_orig_bytes = bytes
bytes = lambda s, *a: _orig_bytes(s)
|
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/HelperScripts/_NotAvailable.py | Python | unlicense | 695 | 0.010072 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Pyth | on 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: _NotAvailable.py
import dsz
import sys
def main():
try:
cmdId = int(sys | .argv[1])
cmdName = dsz.cmd.data.Get('CommandMetaData::Name', dsz.TYPE_STRING, cmdId=cmdId, checkForStop=False)[0]
except:
cmdName = ''
if len(sys.argv) > 2:
reason = ' (%s)' % sys.argv[2]
else:
reason = ''
dsz.ui.Echo("Command '%s' is not available on this platform%s" % (cmdName, reason), dsz.ERROR)
return False
if __name__ == '__main__':
if main() != True:
sys.exit(-1) |
Mapita/mapita_ci | mapita/mapita_ci/settings_geojson_rest.py | Python | mit | 8,582 | 0.006875 | import os
# Django settings for geonition project.
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'geonition_utils.middleware.PreventCacheMiddleware', #should be only for REST data api
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'geonition_utils.middleware.IEEdgeMiddleware', #should be only for ui html/css apps
)
ROOT_URLCONF = 'mapita_ci.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mapita_ci.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#REQUIRED AND MODIFIED
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
#CHANGE TEST RUNNER TO OUR OWN TO DISABLE MODELTRANSLATION TESTS
TEST_RUNNER = 'mapita_ci.tests.GeonitionTestSuiteRunner'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'django.contrib.gis',
#geonition apps
# 'base_page',
# 'dashboard',
# 'maps',
# 'auth_page',
# 'plan_proposals',
# 'geonition_client',
# 'gntauth',
# 'gntimages',
'geojson_rest',
'geonition_utils',
# 'geoforms',
# third party apps
# 'modeltranslation',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
#"base_page.context_processors.organization"
)
#TEMPLATE_DIRS = (os.path.dirname(os.path.realpath(__file__)) + '/../statics/templates')
JAVASCRIPT_CLIENT_TEMPLATES = [
'geonition_auth.jquery.js',
'data_processing.jquery.js',
'opensocial_people.jquery.js',
'geonition_geojson.jquery.js',
'questionnaire.api.js'
]
from django.core.urlresolvers import reverse_lazy
LOGIN_REDIRECT_URL = reverse_lazy('dashboard')
LOGIN_URL = reverse_lazy('login')
LOGOUT_URL = reverse_lazy('logout')
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a[9lifg_(udnsh5w$=4@+kjyt93ys%c9wa8ck(=22_1d*w2gws'
ADMINS = (
('Mikko Johansson', 'mikko.johansson@mapita.fi'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'testdb', # Or path to database file if using sqlite3.
'USER': 'test_user', # Not used with sqlite3.
'PASSWORD': 'test_pw', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
MEDIA_ROOT = '/home/msjohans/geonition_test/media'
#SPATIAL_REFERENCE_SYSTEM_ID = 3067
SPATIAL_REFERENCE_SYSTEM_ID = 3857
LANGUAGES = (('en', 'English'),
| ('fi', 'Suomi'),)
# Language code for this installation. All choice | s can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Helsinki'
SITE_ID = 1
DEBUG = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DEBUG = DEBUG
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/msjohans/geonition_test/static'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
POSTGIS_VERSION = (1, 5, 3)
POSTGIS_TEMPLATE = 'template_postgis'
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS = {'default_lon': 0,
'default_lat': 0,
'default_zoom': 4}
#MODEL TRANSLATION
#MODELTRANSLATION_TRANSLATION_REGISTRY = 'mapita_ci.translation'
#MODELTRANSLATION_TRANSLATION_FILES = ('mapita_ci.translation',)
# for django-jenkins
INSTALLED_APPS += ('django_jenkins',)
PROJECT_APPS = ('geojson_rest',)
#INSTALLED_APPS += ('django_extensions',)
#PROJECT_APPS = [appname for appname in INSTALLED_APPS if not (appname.startswith('django') or appname.startswith('modeltranslation'))]
JENKINS_TEST_RUNNER = 'mapita_ci.tests.GeonitionJenkinsTestSuiteRunner'
JENKINS_TASKS = (
'django_jenkins.tasks.with_coverage',
'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.django_tests' |
AndresYague/Snuppat | output/figuresAndTables/finalGraph.py | Python | mit | 7,072 | 0.015837 | import sys, math, os
import matplotlib.pyplot as plt
def main():
# Check that there's at least one argument
if len(sys.argv) < 2:
print("Usage python {} <file1> [<file2> ...]".format(sys.argv[0]))
return 1
# Automatically detect if decayed
if "decayed" in sys.argv[1]:
plotDecayed = True
else:
plotDecayed = False
# Read input file
fil = "finalGraph.in"
if os.path.isfile(fil):
with open(fil, "r") as fread:
lstyles = fread.readline().strip().split()
labs = []
for line in fread:
labs.append(line.strip())
lowZ = 27 # Lowest z value to represent
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Go file by file
numDens = []
for archivo in sys.argv[1:]:
# Open file for reading
dens = []
fread = open(archivo, "r")
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
newline = None
for line in fread:
if "#" in line:
continue
lnlst = line.split()
if len(lnlst) == 0:
if plotDecayed:
break
else:
continue
if not plotDecayed:
# Surface (newline[0] is the mass)
prevline = newline
newline = [float(x) for x in lnlst]
if newline[0] > 0.85:
break
if plotDecayed:
dens.append(float(lnlst[1]))
# Close file
fread.close()
# Calculate values of interest
if plotDecayed:
numDens.append(dens)
else:
numDens.append([(x + y)*0.5 for (x, y) in
zip(prevline[4:], newline[4:])])
# Calculate now the agb values and print the surface mass fractions per
# each isotope
print("# Surface number fraction values")
agbValues = []
for ii in range(len(numDens)):
dic = {}
dens = numDens[ii]
# Print the model name
print("# {}".format(sys.argv[ii + 1]))
# Add the values for each element
for jj in range(len(atomicNum)):
key = atomicNum[jj]
dic[key] = dic.get(key, 0) + dens[jj]*atomicMass[jj]
# Print the number fraction
print(dens[jj])
agbValues.append(dic)
print("")
# Now identify iron:
ironNumber = namesZ["fe"]
# Now divide every element by iron
for dens in agbValues:
ironDens = dens[ironNumber]
for key in dens:
dens[key] /= ironDens
# Solar as well
ironDens = solarValues[ironNumber]
for key in solarValues:
solarValues[key] /= ironDens
# Now create the final values
finalValues = []
zList = [x for x in solarValues.keys()]
zList.sort()
for dens in agbValues:
thisDens = []
for key in zList:
if key < lowZ:
continue
val = math.log10(dens[key]/solarValues[key])
thisDens.append(val)
finalValues.append(thisDens)
# Create xaxis:
xx = [x for x in zList if x >= lowZ]
# Print final values
print("# [X/Fe] values")
for ii in range(len(sys.argv[1:])):
print("# {}".format(sys.argv[ii + 1]))
print("")
for jj in range(len(xx)):
print(xx[jj], finalValues[ii][jj])
print("")
# From zList create contIndx. This list contains a number of
# tuples with the first and last index of any contiguous sequence
indx = 1; first = 0
prevKey = None; contIndx = []
for key in xx:
if prevKey is None:
prevKey = key
continue
# Check if keys are contiguous
if key - prevKey > 1:
contIndx.append((first, indx))
first = indx
prevKey = key
indx += 1
# Add last tuple
contIndx.append((first, indx + 1))
# Begin plot
figure = plt.figure()
plt.xlabel("Atomic number Z", size = 14)
plt.ylabel("[X/Fe]", size = 14)
# Plot values
if labs is None:
labs = sys.argv[1:]
ii = 0
for dens in finalValues:
# Plot first range
first, last = contIndx[0]
if lstyles is None:
lin, = plt.plot(xx[first:last], dens[first:last],
label = labs[ii], lw = 2)
else:
lin, = plt.plot(xx[first:last], dens[first:last], lstyles[ii],
label = labs[ii], lw = 2)
# Get color and line style
col, lst = lin.get_color(), lin.get_linestyle()
colStyle = col + lst
for elem in contIndx[1:]:
first, last = elem
plt.plot(xx[first:last], dens[first:last], colStyle, lw = 2)
ii += 1
# Set floating text
namAtm = {"Co":27, | "Ge":32, "Se":34, "Kr":36, "Sr":38, "Zr":40,
"Mo":42, "Pd":46, "Cd":48, "Sn":50, "Te":52, "Ba":56,
"Ce":58, "Nd":60, "Sm":62, "Gd":64, "Dy":66, "Er":68,
"Yb":70, "Hf":72, "W":74, "Os":76, "Hg":80, "Pb":82,
"Rb":37, "Cs":55}
rNamAtm = ["Rb", "Cs"]
for name in namAtm:
| yVal = 0
for ii in range(len(xx)):
if xx[ii] == namAtm[name]:
yVal = finalValues[-1][ii]
break
plt.text(namAtm[name] - 0.5, yVal*1.01, name, size = 14)
if name in rNamAtm:
plt.plot(namAtm[name], yVal, "ro")
else:
plt.plot(namAtm[name], yVal, "ko")
plt.legend(loc=0, ncol = 2)
plt.text(30, 1.1, "3M$_\odot$", fontsize = 16)
plt.show()
if __name__ == "__main__":
main()
|
ptroja/spark2014 | testsuite/gnatprove/tests/PC07-016__flow_write_object_to_ali/test.py | Python | gpl-3.0 | 56 | 0 | fr | om test_support import *
do_flow(opt=["--mode=fl | ow"])
|
alexander-emelyanov/microblog | manage.py | Python | mit | 305 | 0.003279 | #!env/bin/python
from flask.ext.script imp | ort Manager
from flask.ext.migrate import Migrate, MigrateCommand
from app import app, db
app.config.from_object('config')
migrate = Migrate(app, db)
manager = Manager(a | pp)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() |
eeneku/baller | src/engine/system.py | Python | gpl-3.0 | 235 | 0.012766 | # -*- cod | ing: utf-8 -*-
class System(object):
""" This is the father class for all different systems. """
def __init__(self, *args, **kwargs):
pass
def update(self, dt):
raise NotImplementedEr | ror
|
btengels/supergametools | __init__.py | Python | gpl-3.0 | 101 | 0 | """
mats | tat docstrings
"""
# from supergame import supergame
# from supergamet | ools import supergame
|
arfar/art-py | apis.py | Python | gpl-2.0 | 2,779 | 0 | import urllib
import urllib2
import json
import difflib
import pylast
class iTunesAlbumArt(object):
def __init__(self):
self.base_url = 'https://itunes.apple.com/search'
self.limit = 50
def _form_url(self, artist):
args_dict = {'limit': self.limit,
'term': artist}
args_string = urllib.urlencode(args_dict)
search_url = self.base_url + '?' + args_string
return search_url
def _get_largest_pic_url(self, pic_100_url):
resolutions_to_try = [
'1500x1500',
'1200x1200',
'900x900',
'600x600',
'300x300',
'100x100'
]
head, _, tail = pic_100_url.rpartition('100x100')
for resolution in resolutions_to_try:
try:
potential_pic_url = head + resolution + tail
urllib2.urlopen(potential_pic_url)
except ValueError:
# URL not well formatted
continue
except urllib2.URLError:
# Doesn't seem to exist
continue
break
return potential_pic_url
def _find_album(self, tracks_by_artist, album):
for track in tracks_by_artist:
if track.get('collectionName', None):
difference = difflib.SequenceMatcher(None,
track['collectionName'],
album).ratio()
if difference > 0.5:
return track
return None
def find_art(self, artist, album):
search_url = self._form_url(artist)
response = urllib2.urlopen(search_url)
response_json = json.loads(response.read())
if response_json['resultCount'] = | = 0:
return None
tracks = response_json['results']
track = self._find_album(tracks, album)
if not track:
return None
| large_picture_url = self._get_largest_pic_url(track['artworkUrl100'])
return large_picture_url
class LastFMAlbumArt(object):
"""
Trivially stupid stub
I just wanted to make it look consistant with the iTunes one
"""
def __init__(self, key=None, secret=None):
if not key or not secret:
print 'Last.fm API Key and Secret required'
return None
self.api = pylast.LastFMNetwork(
api_key=key,
api_secret=secret
)
def find_art(self, artist, album_name):
try:
album = self.api.get_album(artist, album_name)
pic_url = album.get_cover_image(pylast.COVER_EXTRA_LARGE)
except pylast.WSError:
pic_url = None
return pic_url
|
FeiZhan/Algo-Collection | answers/leetcode/Generate Parentheses/Generate Parentheses.py | Python | mit | 597 | 0.001675 | class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
parent_list = []
self.helper(0, n, "", parent_list)
return parent_list
def helper(self, left, total, parent, parent_list):
if left >= total and len(parent) >= total + total:
| parent_list.append(parent)
return
if left < total:
self.helper(left + 1, total, parent + '(', parent_list)
if len(parent) < left + left:
| self.helper(left, total, parent + ')', parent_list)
|
vitaliykomarov/NEUCOGAR | nest/code_generator/run/generated/data.py | Python | gpl-2.0 | 11,367 | 0.012404 | import nest
import numpy as np
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True, 'local_num_threads': 4, 'resolution': 0.1})
number_of_neuron = 18567530
DEFAULT = 10
raphenucleus = (
)
lateralcortex_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if lateralcortex_5HT_NN < DEFAULT : lateralcortex_5HT_NN = DEFAULT
lateralcortex = (
{'Name': 'lateralcortex[lateralcortex_5HT]', 'NN': lateralcortex_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', lateralcortex_5HT_NN)},
)
lateralcortex_5HT = 0
Basalganglia_5HT_NN = int(2593900 / 18567530 * number_of_neuron)
if Basalganglia_5HT_NN < DEFAULT : Basalganglia_5HT_NN = DEFAULT
Basalganglia = (
{'Name': 'Basalganglia[Basalganglia_5HT]', 'NN': Basalganglia_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', Basalganglia_5HT_NN)},
)
Basalganglia_5HT = 0
entorhinalcortex_5HT_NN = int(635000 / 18567530 * number_of_neuron)
if entorhinalcortex_5HT_NN < DEFAULT : entorhinalcortex_5HT_NN = DEFAULT
entorhinalcortex = (
{'Name': 'entorhinalcortex[entorhinalcortex_5HT]', 'NN': entorhinalcortex_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', entorhinalcortex_5HT_NN)},
)
entorhinalcortex_5HT = 0
medialcortex_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if medialcortex_5HT_NN < DEFAULT : medialcortex_5HT_NN = DEFAULT
medialcortex = (
{'Name': 'medialcortex[medialcortex_5HT]', 'NN': medialcortex_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', medialcortex_5HT_NN)},
)
medialcortex_5HT = 0
locuscoeruleus_5HT_NN = int(1500 / 18567530 * number_of_neuron)
if locuscoeruleus_5HT_NN < DEFAULT : locuscoeruleus_5HT_NN = DEFAULT
locuscoeruleus_DA_NN = int(1500 / 18567530 * number_of_neuron)
if locuscoeruleus_DA_NN < DEFAULT : locuscoeruleus_DA_NN = DEFAULT
locuscoeruleus_NA_NN = int(1500 / 18567530 * number_of_neuron)
if locuscoeruleus_NA_NN < DEFAULT : locuscoeruleus_NA_NN = DEFAULT
locuscoeruleus = (
{'Name': 'locuscoeruleus[locuscoeruleus_5HT]', 'NN': locuscoeruleus_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', locuscoeruleus_5HT_NN)},
{'Name': 'locuscoeruleus[locuscoeruleus_DA]', 'NN': locuscoeruleus_DA_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', locuscoeruleus_DA_NN)},
{'Name': 'locuscoeruleus[locuscoeruleus_NA]', 'NN': locuscoeruleus_NA_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', locuscoeruleus_NA_NN)}
)
locuscoeruleus_5HT = 0
locuscoeruleus_DA = 1
locuscoeruleus_NA = 2
ventraltegmentalarea_5HT_NN = int(61000 / 18567530 * number_of_neuron)
if ventraltegmentalarea_5HT_NN < DEFAULT : ventraltegmentalarea_5HT_NN = DEFAULT
ventraltegmentalarea_DA_NN = int(61000 / 18567530 * number_of_neuron)
if ventraltegmentalarea_DA_NN < DEFAULT : ventraltegmentalarea_DA_NN = DEFAULT
ventraltegmentalarea = (
{'Name': 'ventraltegmentalarea[ventraltegmentalarea_5HT]', 'NN': ventraltegmentalarea_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', ventraltegmentalarea_5HT_NN)},
{'Name': 'ventraltegmentalarea[ventraltegmentalarea_DA]', 'NN': ventraltegmentalarea_DA_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', ventraltegmentalarea_DA_NN)}
)
ventraltegmentalarea_5HT = 0
ventraltegmentalarea_DA = 1
nucleusaccumbens_5HT_NN = int(30000 / 18567530 * number_of_neuron)
if nucleusaccumbens_5HT_NN < DEFAULT : nucleusaccumbens_5HT_NN = DEFAULT
nucleusaccumbens_DA_NN = int(30000 / 18567530 * number_of_neuron)
if nucleusaccumbens_DA_NN < DEFAULT : nucleusaccumbens_DA_NN = DEFAULT
nucleusaccumbens = (
{'Name': 'nucleusaccumbens[nucleusaccumbens_5HT]', 'NN': nucleusaccumbens_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', nucleusaccumbens_5HT_NN)},
{'Name': 'nucleusaccumbens[nucleusaccumbens_DA]', 'NN': nucleusaccumbens_DA_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', nucleusaccumbens_DA_NN)}
)
nucleusaccumbens_5HT = 0
nucleusaccumbens_DA = 1
Cerebralcortex_5HT_NN = int(100 / 18567530 * number_of_neuron)
if Cerebralcortex_5HT_NN < DEFAULT : Cerebralcortex_5HT_NN = DEFAULT
Cerebralcortex = (
{'Name': 'Cerebralcortex[Cerebralcortex_5HT]', 'NN': Cerebralcortex_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', Cerebralcortex_5HT_NN)},
)
Cerebralcortex_5HT = 0
Thalamus_5HT_NN = int(5000000 / 18567530 * number_of_neuron)
if Thalamus_5HT_NN < DEFAULT : Thalamus_5HT_NN = DEFAULT
Thalamus = (
{'Name': 'Thalamus[Thalamus_5HT]', 'NN': Thalamus_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', Thalamus_5HT_NN)},
)
Thalamus_5HT = 0
insularcortex_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if insularcortex_5HT_NN < DEFAULT : insularcortex_5HT_NN = DEFAULT
insularcortex = (
{'Name': 'insularcortex[insularcortex_5HT]', 'NN': insularcortex_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', insularcortex_5HT_NN)},
)
insularcortex_5HT = 0
Rostralgroup_A1_NN = int(6900 / 18567530 * number_of_neuron)
if Rostralgroup_A1_NN < DEFAULT : Rostralgroup_A1_NN = DEFAULT
Rostralgroup_A2_NN = int(6900 / 18567530 * number_of_neuron)
if Rostralgroup_A2_NN < DEFAULT : Rostralgroup_A2_NN = DEFAULT
Rostralgroup = (
{'Name': 'Rostralgroup[Rostralgroup_A1]', 'NN': Rostralgroup_A1_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', Rostralgroup_A1_NN)},
{'Name': 'Rostralgroup[Rostralgroup_A2]', 'NN': Rostralgroup_A2_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', Rostralgroup_A2_NN)}
)
Rostralgroup_A1 = 0
Rostralgroup_A2 = 1
Caudalgroup = (
)
septum_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if septum_5HT_NN < DEFAULT : septum_5HT_NN = DEFAULT
septum = (
{'Name': 'septum[septum_5HT]', 'NN': septum_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', septum_5HT_NN)},
)
septum_5HT = 0
hypothalamus_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if hypothalamus_5HT_NN < DEFAULT : hypothalamus_5HT_NN = DEFAULT
hypothalamus = (
{'Name': 'hypothalamus[hypothalamus_5HT]', 'NN': hypothalamus_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', hypothalamus_5HT_NN)},
)
hypothalamus_5HT = 0
RMg = (
)
| hippocampus_5HT_NN = int(4260000 / 18567530 * number_of_neuron)
if hippocampus_5HT_NN < DEFAULT : hippocampus_5HT_NN = DEFAULT
hippocampus = (
{'Name': 'hippocampus[hippocampus_5HT]', 'NN': hippocampus_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', hippocampus_5HT_NN)},
)
hippocampus_5HT = 0
RPa = (
)
lateraltegmentalarea_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if lateraltegmentalarea_5HT_NN < DEFAULT : lateraltegmentalarea_5HT_NN = DEFAULT
lateralte | gmentalarea = (
{'Name': 'lateraltegmentalarea[lateraltegmentalarea_5HT]', 'NN': lateraltegmentalarea_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', lateraltegmentalarea_5HT_NN)},
)
lateraltegmentalarea_5HT = 0
neocortex_5HT_NN = int(100 / 18567530 * number_of_neuron)
if neocortex_5HT_NN < DEFAULT : neocortex_5HT_NN = DEFAULT
neocortex = (
{'Name': 'neocortex[neocortex_5HT]', 'NN': neocortex_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', neocortex_5HT_NN)},
)
neocortex_5HT = 0
bednucleusofthestriaterminalis_5HT_NN = int(1000 / 18567530 * number_of_neuron)
if bednucleusofthestriaterminalis_5HT_NN < DEFAULT : bednucleusofthestriaterminalis_5HT_NN = DEFAULT
bednucleusofthestriaterminalis = (
{'Name': 'bednucleusofthestriaterminalis[bednucleusofthestriaterminalis_5HT]', 'NN': bednucleusofthestriaterminalis_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', bednucleusofthestriaterminalis_5HT_NN)},
)
bednucleusofthestriaterminalis_5HT = 0
DR_5HT_NN = int(5800 / 18567530 * number_of_neuron)
if DR_5HT_NN < DEFAULT : DR_5HT_NN = DEFAULT
DR = (
{'Name': 'DR[DR_5HT]', 'NN': DR_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', DR_5HT_NN)},
)
DR_5HT = 0
MnR_5HT_NN = int(1100 / 18567530 * number_of_neuron)
if MnR_5HT_NN < DEFAULT : MnR_5HT_NN = DEFAULT
MnR = (
{'Name': 'MnR[MnR_5HT]', 'NN': MnR_5HT_NN, 'Model': 'iaf_psc_alpha', 'IDs': nest.Create('iaf_psc_alpha', MnR_5HT_NN)},
)
MnR_5HT = 0
reticularformation_5HT_NN = i |
gburd/dbsql | src/py/doc/code/connect_db_1.py | Python | gpl-3.0 | 69 | 0 | from pysqlit | e2 import dbapi2 as sqlite
con = sqlite.connect("mydb | ")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.