text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Alembic CLI with simplified configuration.
Alembic is a terrific tool that makes some unfortunate choices about configuration,
expecting a verbose directory structure with several layers of configuration.
This module monkey patches Alembic's CLI tool to work better within a microcosm.
To use this entry enty point instead of the Alembic CLI:
0. Don't use `alembic init`
1. Write models that extend `microcosm_postgres.models.Model`
2. Add a `migrations` directory within your source tree.
This directory does not need to be an importable Python module, but it should
be included as part of your distribution so that migrations ship with the service.
3. Initialize your object graph (including your models):
from microcosm.api import create_object_graph
from microcosm_postgres.example import Company, Employee # noqa
graph = create_object_graph(name="example", testing=True)
The migrations directory is loaded by default assuming that the `name` attribute
is a module name (though this behavior can be customized; see `microcosm.metadata:Metadata`)
or by wiring up a stirng as the "migrations_dir" component of the graph.
4. Write an entry point that invokes the `main` function with the object graph:
main(graph)
"""
from contextlib import contextmanager
from os.path import isdir, join
from shutil import rmtree
from sys import argv
from tempfile import mkdtemp
from textwrap import dedent
from alembic import context
from alembic.config import CommandLine, Config
from alembic.script import ScriptDirectory
from microcosm.errors import LockedGraphError, NotBoundError
from microcosm_postgres.models import Model
def make_alembic_config(temporary_dir, migrations_dir):
"""
Alembic uses the `alembic.ini` file to configure where it looks for everything else.
Not only is this file an unnecessary complication around a single-valued configuration,
the single-value it chooses to use (the alembic configuration directory), hard-coding
the decision that there will be such a directory makes Alembic setup overly verbose.
Instead, generate a `Config` object with the values we care about.
:returns: a usable instance of `Alembic.config.Config`
"""
config = Config()
config.set_main_option("temporary_dir", temporary_dir)
config.set_main_option("migrations_dir", migrations_dir)
return config
def make_script_directory(cls, config):
"""
Alembic uses a "script directory" to encapsulate its `env.py` file, its migrations
directory, and its `script.py.mako` revision template.
We'd rather not have such a directory at all as the default `env.py` rarely works
without manipulation, migrations are better saved in a location within the source tree,
and revision templates shouldn't vary between projects.
Instead, generate a `ScriptDirectory` object, injecting values from the config.
"""
temporary_dir = config.get_main_option("temporary_dir")
migrations_dir = config.get_main_option("migrations_dir")
return cls(
dir=temporary_dir,
version_locations=[migrations_dir],
)
def get_alembic_environment_options(graph):
try:
return graph.config.alembic.environment_options
except (AttributeError, LockedGraphError, NotBoundError):
return dict()
def run_online_migration(self):
"""
Run an online migration using microcosm configuration.
This function takes the place of the `env.py` file in the Alembic migration.
"""
connectable = self.graph.postgres
with connectable.connect() as connection:
context.configure(
connection=connection,
# assumes that all models extend our base
target_metadata=Model.metadata,
**get_alembic_environment_options(self.graph),
)
with context.begin_transaction():
context.run_migrations()
def make_script_py_mako():
"""
Generate the template for new migrations.
This function takes the place of the `script.py.mako` file in the alembic directory.
"""
return dedent('''\
"""
${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}
''')
@contextmanager
def patch_script_directory(graph):
"""
Monkey patch the `ScriptDirectory` class, working around configuration assumptions.
Changes include:
- Using a generated, temporary directory (with a generated, temporary `script.py.mako`)
instead of the assumed script directory.
- Using our `make_script_directory` function instead of the default `ScriptDirectory.from_config`.
- Using our `run_online_migration` function instead of the default `ScriptDirectory.run_env`.
- Injecting the current object graph.
"""
temporary_dir = mkdtemp()
from_config_original = getattr(ScriptDirectory, "from_config")
run_env_original = getattr(ScriptDirectory, "run_env")
# use a temporary directory for the revision template
with open(join(temporary_dir, "script.py.mako"), "w") as file_:
file_.write(make_script_py_mako())
file_.flush()
# monkey patch our script directory and migration logic
setattr(ScriptDirectory, "from_config", classmethod(make_script_directory))
setattr(ScriptDirectory, "run_env", run_online_migration)
setattr(ScriptDirectory, "graph", graph)
try:
yield temporary_dir
finally:
# cleanup
delattr(ScriptDirectory, "graph")
setattr(ScriptDirectory, "run_env", run_env_original)
setattr(ScriptDirectory, "from_config", from_config_original)
rmtree(temporary_dir)
def get_migrations_dir(graph):
"""
Resolve the migrations directory path.
Either take the directory from a component of the object graph or by
using the metaata's path resolution facilities.
"""
try:
migrations_dir = graph.migrations_dir
except (LockedGraphError, NotBoundError):
migrations_dir = graph.metadata.get_path("migrations")
if not isdir(migrations_dir):
raise Exception("Migrations dir must exist: {}".format(migrations_dir))
return migrations_dir
def main(graph, *args):
"""
Entry point for invoking Alembic's `CommandLine`.
Alembic's CLI defines its own argument parsing and command invocation; we want
to use these directly but define configuration our own way. This function takes
the behavior of `CommandLine.main()` and reinterprets it with our patching.
:param graph: an initialized object graph
:param migration_dir: the path to the migrations directory
"""
migrations_dir = get_migrations_dir(graph)
cli = CommandLine()
options = cli.parser.parse_args(args if args else argv[1:])
if not hasattr(options, "cmd"):
cli.parser.error("too few arguments")
if options.cmd[0].__name__ == "init":
cli.parser.error("Alembic 'init' command should not be used in the microcosm!")
with patch_script_directory(graph) as temporary_dir:
config = make_alembic_config(temporary_dir, migrations_dir)
cli.run_cmd(config, options)
| {
"content_hash": "86c763522233620a23e9b9393a7b6a44",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 104,
"avg_line_length": 33.15584415584416,
"alnum_prop": 0.7050528789659224,
"repo_name": "globality-corp/microcosm-postgres",
"id": "61ab11a7247c300932dd1a0c0f931454eecd87cb",
"size": "7659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microcosm_postgres/migrate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3310"
},
{
"name": "Python",
"bytes": "149073"
},
{
"name": "Shell",
"bytes": "1587"
}
],
"symlink_target": ""
} |
import os
from os import path
from pathlib import Path
from ...exechelper import func_exec_run
from ...fileop import PosixFileSystem
from ....util import Utility
bwa = path.join(path.abspath(path.dirname(__file__)), path.join('bin', 'bwa'))
def build_bwa_index(ref):
cmdargs = ['index', ref]
return func_exec_run(bwa, *cmdargs)
def run_bwa(*args, **kwargs):
paramindex = 0
if 'ref' in kwargs.keys():
ref = kwargs['ref']
else:
if len(args) == paramindex:
raise ValueError("Argument error")
ref = args[paramindex]
paramindex +=1
ref = Utility.get_normalized_path(ref)
indexpath = Path(ref).stem + ".bwt"
indexpath = os.path.join(os.path.dirname(ref), os.path.basename(indexpath))
if not os.path.exists(indexpath):
build_bwa_index(ref)
if 'data1' in kwargs.keys():
data1 = kwargs['data1']
else:
if len(args) == paramindex:
raise ValueError("Argument error")
data1 = args[paramindex]
paramindex +=1
data1 = Utility.get_normalized_path(data1)
if 'data2' in kwargs.keys():
data2 = kwargs['data2']
else:
if len(args) > paramindex:
data2 = args[paramindex]
paramindex +=1
if data2:
data2 = Utility.get_normalized_path(data2)
if 'output' in kwargs.keys():
output = kwargs['output']
else:
if len(args) > paramindex:
output = args[paramindex]
paramindex +=1
if output:
output = Utility.get_normalized_path(output)
else:
output = Path(data1).stem + ".sam"
output = os.path.join(os.path.dirname(data1), os.path.basename(output))
output = Utility.get_normalized_path(output)
if not os.path.exists(path.dirname(output)):
os.makedirs(path.dirname(output))
if os.path.exists(output):
os.remove(output)
cmdargs = ['mem', ref, data1]
if data2:
cmdargs.append(data2)
cmdargs.append("-o {0}".format(output))
for arg in args[paramindex + 1:]:
cmdargs.append(arg)
_,err = func_exec_run(bwa, *cmdargs)
fs = PosixFileSystem(Utility.get_rootdir(2))
if not os.path.exists(output):
raise ValueError("bwa could not generate the file " + fs.strip_root(output) + " due to error " + err)
return fs.strip_root(output) | {
"content_hash": "cca11eb7fc20bc56629507d0414da366",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 109,
"avg_line_length": 28.102272727272727,
"alnum_prop": 0.5830974524868581,
"repo_name": "mainulhossain/phenoproc",
"id": "d96fb1378a4b4ef7ee4438edf5f2a425f49737f6",
"size": "2473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/biowl/libraries/bwa/adapter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9109"
},
{
"name": "HTML",
"bytes": "101359"
},
{
"name": "Java",
"bytes": "6668"
},
{
"name": "JavaScript",
"bytes": "6630"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "560029"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
import os
import subprocess
import paramiko
import argparse
import json
# Manage the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update', action='store_true', help='Update the application')
parser.add_argument('-b', '--build', action='store_true', help='If true, build application before update. Defaults to true.')
parser.add_argument('-f', '--file', help='Settings file')
args = parser.parse_args()
if args.update:
update = True
else:
update = False
if args.build:
build = True
else:
build = False
# Read the settings from file
with open(args.file) as settings_file:
settings = json.load(settings_file)
# Set up some variables for improved legibility
local = settings['local']
server = settings['server']
db = settings['db']
##
# Steps
#
# Build the app
# Upload app at appropriate locations
# Unpack
# Install dependencies
# Generate and upload the startup file
# Run the application
##
# Build the app
temp_folder = os.path.expanduser('~/%s/%s_build' % (local['path'], local['app']))
if build:
cmds = [
'cd %s/%s' % (local['path'], local['app']),
'meteor build %s --architecture os.linux.x86_64 --server %s' % (temp_folder, server['url'])
]
print('Building application...')
output = subprocess.check_output(";".join(cmds), shell=True)
print(output.decode(encoding='utf-8'))
# Connect to server and upload the app built
print('Connecting to the server...')
conn = paramiko.SSHClient()
conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
conn.connect(server['remote'], username=server['user'], password=server['password'])
print('Connection established!')
print('Starting SFTP session...')
sftp = conn.open_sftp()
print('SFTP session open!')
sftp.chdir('webapps/%s' % server['app'])
print('Start uploading app archive...')
sftp.put('%s/%s.tar.gz' % (temp_folder, local['app']), '%s.tar.gz' % local['app'])
print('Upload done!')
# Unpack
print('Extracting archive files...')
cmds = [
'cd ~/webapps/%s' % server['app'],
'rm -rf bundle',
'tar -zxf %s.tar.gz' % local['app'],
'rm %s.tar.gz' % local['app']
]
si, so, se = conn.exec_command(';'.join(cmds))
print(''.join(so.readlines()))
print('Files extracted!')
# Install dependencies
print('Installing dependencies...')
cmds = [
'cd ~/webapps/%s/bundle/programs/server' % server['app'],
'PATH=~/webapps/%s/bin/:$PATH' % server['app'],
'npm install --silent'
]
si, so, se = conn.exec_command(';'.join(cmds))
print(''.join(so.readlines()))
print('Dependencies installed!')
# Generate and upload the startup file
if not update:
print('Generate startup file...')
base = '/home/%s/webapps/%s' % (server['user'], server['app'])
lines = [
'#!/bin/sh',
'mkdir -p %s/run' % base,
'export MONGO_URL=%s' % db['mongodb'],
'export ROOT_URL=%s' % server['url'],
'export PORT=%s' % server['port'],
'pid=$(/sbin/pidof %s/bin/node)' % base,
'if echo "$pid" | grep -q " "; then',
' pid=""',
'fi',
'if [ -n "$pid" ]; then',
' user=$(ps -p $pid -o user:20 | tail -n 1)',
' if [ $user = "gionas" ]; then',
' exit(0)',
' fi',
'fi',
'nohup %s/bin/node %s/bundle/main.js > /dev/null 2>&1 &' % (base, base),
'/sbin/pidof %s/bin/node > %s/run/node.pid' % (base, base)
]
file = open('%s/start' % temp_folder, 'w')
file.write('\n'.join(lines))
print('Remove the current start file...')
cmds = [
'cd ~/webapps/%s/bin' % server['app'],
'rm start'
]
si, so, se = conn.exec_command(';'.join(cmds))
if not se:
print('Start file removed!')
else:
print(''.join(se.readlines()))
exit(1)
print('Uploading new start file...')
sftp.chdir('webapps/%s/bin' % server['app'])
sftp.put('%s/start' % temp_folder)
print('Start file uploaded!')
# Start the application (if everything worked out fine)
print('(re)Starting the app...')
cmds = [
'~/webapps/%s/bin/stop' % server['app'],
'~/webapps/%s/bin/start' % server['app']
]
si, so, se = conn.exec_command(';'.join(cmds))
print('Meteor application started')
conn.close()
print('All done! Good bye!')
| {
"content_hash": "f1774a3b4a3b9379a7b53f79457a051c",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 125,
"avg_line_length": 28.13157894736842,
"alnum_prop": 0.6057062675397568,
"repo_name": "igio/webfaction-meteor",
"id": "152460c5cf031ec3ebf63d79a660c171e08a2503",
"size": "4300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4300"
}
],
"symlink_target": ""
} |
import os, sys
from optparse import OptionParser
from configparser import ConfigParser
class Config():
#配置对象
__config = ""
#配置文件
__config_file = "Config.ini"
#初始化文件
def __init__(self, app_name=""):
#获取当前目录
cur_path = os.path.normpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_path = cur_path+"/../../Conf/"+self.__config_file
if app_name != "":
config_path = cur_path+"/../../../App/"+app_name+"/Conf/"+self.__config_file
#设置配置文件
self.parser(config_path)
#解析INI文件
def parser(self, file_name=""):
if not os.path.isfile(file_name):
print("you input ini file not is file")
sys.exit()
self.__config = ConfigParser()
self.__config.readfp(open(file_name))
#读取配置
def get(self, param_name=""):
temp = param_name.split(".")
if len(temp) == 2:
node = temp[0]
field = temp[1]
else:
node = "default"
field = temp[0]
#print(dir(self.__config))
#print(self.__config.items("db"))
value = self.__config.get(node, field)
return value
#读取节点配置信息
def getSection(self, name="default"):
return self.__config.items(name)
if __name__ == "__main__":
c = Config()
print(c.get("db.deploy_type"))
| {
"content_hash": "629e6c8d7da88810391757bc1ab76207",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 90,
"avg_line_length": 27.41176470588235,
"alnum_prop": 0.5221745350500715,
"repo_name": "lxy235/lserver",
"id": "3b2ad4b9a91617214222728bf0863897f7a784fc",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Lib/Configs/Config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43453"
}
],
"symlink_target": ""
} |
from zipfile import ZipFile
from scar.utils import FileUtils, SysUtils
def _extract_udocker_zip(supervisor_zip_path) -> None:
file_path = ""
with ZipFile(supervisor_zip_path) as thezip:
for file in thezip.namelist():
if file.endswith("udocker.zip"):
file_path = FileUtils.join_paths(FileUtils.get_tmp_dir(), file)
thezip.extract(file, FileUtils.get_tmp_dir())
break
return file_path
class Udocker():
_CONTAINER_NAME = "udocker_container"
def __init__(self, resources_info: str, tmp_payload_folder_path: str, supervisor_zip_path: str):
self.resources_info = resources_info
self._tmp_payload_folder_path = tmp_payload_folder_path
self._udocker_dir = FileUtils.join_paths(self._tmp_payload_folder_path, "udocker")
self._udocker_dir_orig = ""
self._udocker_code = FileUtils.join_paths(self._udocker_dir, "udocker.py")
self._udocker_exec = ['python3', self._udocker_code]
self._install_udocker(supervisor_zip_path)
def _install_udocker(self, supervisor_zip_path: str) -> None:
udocker_zip_path = _extract_udocker_zip(supervisor_zip_path)
with ZipFile(udocker_zip_path) as thezip:
thezip.extractall(self._tmp_payload_folder_path)
def _save_tmp_udocker_env(self):
# Avoid override global variables
if SysUtils.is_variable_in_environment("UDOCKER_DIR"):
self._udocker_dir_orig = SysUtils.get_environment_variable("UDOCKER_DIR")
# Set temporal global vars
SysUtils.set_environment_variable("UDOCKER_DIR", self._udocker_dir)
def _restore_udocker_env(self):
if self._udocker_dir_orig:
SysUtils.set_environment_variable("UDOCKER_DIR", self._udocker_dir_orig)
else:
SysUtils.delete_environment_variable("UDOCKER_DIR")
def _set_udocker_local_registry(self):
self.resources_info['lambda']['environment']['Variables']['UDOCKER_REPOS'] = '/var/task/udocker/repos/'
self.resources_info['lambda']['environment']['Variables']['UDOCKER_LAYERS'] = '/var/task/udocker/layers/'
def prepare_udocker_image(self):
self._save_tmp_udocker_env()
cmd_out = SysUtils.execute_command_with_msg(self._udocker_exec + ["load", "-i",
self.resources_info.get('lambda').get('container').get('image_file')],
cli_msg="Loading image file")
# Get the image name from the command output
self.resources_info['lambda']['container']['image'] = cmd_out.split('\n')[1]
self._set_udocker_local_registry()
self._restore_udocker_env()
| {
"content_hash": "5bdb3154b48dc2321bf004ce3b23251a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 144,
"avg_line_length": 46,
"alnum_prop": 0.6224637681159421,
"repo_name": "grycap/scar",
"id": "17d486dcbe0202798461c812254dcda647894bd5",
"size": "3338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scar/providers/aws/udocker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "324748"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:18636")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| {
"content_hash": "eb017b6b158cf3bc6b98b62a6f3cabff",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 44.2,
"alnum_prop": 0.7601809954751131,
"repo_name": "DannyHex/Hexagon",
"id": "4c23380302db9c787adb794c6b7946ecbf10bf13",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletchangepass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32394"
},
{
"name": "C++",
"bytes": "2605857"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "13377"
},
{
"name": "NSIS",
"bytes": "5918"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69716"
},
{
"name": "QMake",
"bytes": "14749"
},
{
"name": "Roff",
"bytes": "18289"
},
{
"name": "Shell",
"bytes": "16339"
}
],
"symlink_target": ""
} |
"""
Author: Diego da Hora
email: diego.da-hora@lip6.fr
Date: 23/06/2013
Updated by: Julio Adriazola
email: jcadriaz@uc.cl
Date: 15/02/2016
"""
import sqlalchemy, re, datetime, time
from sqlalchemy import Column, Integer, String, Float, BigInteger, TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import INET
from DAO import DAO
from log import *
''' EXAMPLE FLOW OUTPUT
TCP connection 6211:
host ris: 69.17.253.32:52223
host rit: 0.0.0.0:9547
complete conn: yes
first packet: 1292023717.880717
last packet: 1292024495.641582
elapsed time: 777.760864
total packets: 9342
filename: separate/35204ECB_7387-4F8B-B259-4EC1975723AC___Fri_Dec_10_20:40:13_2010aa
'''
CSV_ARRAY = ['pcap_id', 'flow_code', 'source_ip', 'source_port', 'destination_ip', 'destination_port', 'protocol', 'first_packet', 'last_packet', 'status', 'total_time', 'total_packets', 'idle_time_ab', 'idle_time_ba', 'bytes_ab', 'bytes_ba']
TABLE = 'pcap_flow'
class Flow(DAO.Base):
__tablename__ = TABLE
flowid = Column('id',Integer, primary_key=True)
pcap_id = Column(Integer, nullable=False)
flow_code = Column(String, nullable=False)
source_ip = Column(INET, nullable=False)
source_port = Column(Integer, nullable=False)
destination_ip = Column(INET, nullable=False)
destination_port = Column(Integer, nullable=False)
protocol = Column(String(1), nullable=False)
first_packet = Column(TIMESTAMP)
last_packet = Column(TIMESTAMP)
status = Column(String)
total_time = Column(Float)
total_packets = Column(Integer)
idle_time_ab = Column(Float)
idle_time_ba = Column(Float)
bytes_ab = Column(BigInteger)
bytes_ba = Column(BigInteger)
# NOT NEEDED ANYMORE
# gt_application_name = Column(String) #GT?????
# ia_application_name = Column(String) #IA?????
# dns_destionation_name = Column(String)
# #appid = Column(Integer)
# #ianaappid = Column(Integer)
# behavior = Column(String)
# base_latency = Column(Float)
# sub_domain = Column(String)
# domain = Column(String)
# suffix = Column(String)
# c_type = Column(ARRAY(String)) #C?????
def __init__(self, pcap_id, flow_code=None, source_ip=None, source_port=None, destination_ip=None, destination_port=None, protocol='T'):
self.pcap_id = pcap_id
self.flow_code = flow_code
self.source_ip = source_ip
self.source_port = source_port
self.destination_ip = destination_ip
self.destination_port = destination_port
self.protocol = protocol
self.first_packet = None
self.last_packet = None
self.status = None
self.total_time = None
self.total_packets = None
self.idle_time_ab = None
self.idle_time_ba = None
self.bytes_ab = None
self.bytes_ba = None
def __repr__(self):
return "Flow('%s','%s','%s'.\t%s:%s -> %s:%s - %s\tfrom '%s' to '%s')" % (self.flowid, self.pcap_id, self.flow_code, self.source_ip, self.source_port, self.destination_ip, self.destination_port, self.protocol, self.first_packet, self.last_packet)
def save(self):
DAO().create(self)
def getFlowHash(self, reverse=False):
if reverse: return (str(self.destination_port), str(self.source_ip), str(self.source_port) )
else: return (str(self.source_port), str(self.destination_ip), str(self.destination_port) )
def getBytesSize(self):
bytes = 0
if self.bytes_ab: bytes+= self.bytes_ab
if self.bytes_ba: bytes+= self.bytes_ba
return bytes
def compareProtocol(self, protocol):
protocol = protocol.lower().strip()
if self.protocol == None:
return False
elif self.protocol == 'T':
return (protocol == 't') or (protocol == 'tcp')
elif self.protocol == 'U':
return (protocol == 'u') or (protocol == 'udp')
elif self.protocol == 'I':
return (protocol == 'i') or (protocol == 'icmp')
else:
return protocol == self.protocol.lower()
def getProtocol(self):
protocol = self.protocol.lower().strip()
if protocol == 't' or protocol == 'tcp':
return 'tcp';
elif protocol == 'u' or protocol == 'udp':
return 'udp'
elif protocol == 'i' or protocol == 'icmp':
return 'icmp'
else:
return ''
@staticmethod
def getDefinitionArray():
return CSV_ARRAY
@staticmethod
def getTableName():
return TABLE
@staticmethod
def readFromTCPTrace(tcptraceOutput,pcap_id):
startTime = time.time()
objList = []
obj = None
readSrc = False
hostLetter = ""
for line in tcptraceOutput.rstrip().split('\n'):
line = line.lower().strip()
#TCP connection 6211:
m = re.search("^\s*tcp connection\s+(\d+):",line)
if m:
obj = Flow(pcap_id) #New object detected
obj.bytes_ab = -1
obj.bytes_ba = -1
objList.append(obj)
readSrc = False
#host ris: 69.17.253.32:52223
# TODO: Do we want downloads too? Only uploads are registered for now.
m = re.search("^\s*host\s(\w+):\s+(\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}):(\d+)",line)
if m:
if not readSrc:
hostLetter = m.group(1)
obj.source_ip = m.group(2)
obj.source_port = m.group(3)
readSrc = True
else:
obj.flow_code = hostLetter + "2" + m.group(1)
obj.destination_ip = m.group(2)
obj.destination_port = m.group(3)
else:
m = re.search("^\s*host\s(\w+):\s+([a-f0-9:]*[a-f0-9]*):(\d+)",line)
if m:
log.info("FOUND IPV6: %s%s - %s - %s" % (BLUE,m.group(1),m.group(2),m.group(3)))
if not readSrc:
hostLetter = m.group(1)
obj.source_ip = m.group(2)
obj.source_port = m.group(3)
readSrc = True
else:
obj.flow_code = hostLetter + "2" + m.group(1)
obj.destination_ip = m.group(2)
obj.destination_port = m.group(3)
#complete conn: yes
m = re.search("^\s*complete conn:\s+(.*)",line)
if m:
obj.status = re.sub(":","",re.sub("\t"," ",m.group(1))).strip() #Replacing \t and :
#first packet: 1292023717.880717
m = re.search("^\s*first packet:\s+(\d+.\d+)", line)
if m:
obj.first_packet = datetime.datetime.fromtimestamp(float(m.group(1)))
#last packet: 1292024495.641582
m = re.search("^\s*last packet:\s+(\d+.\d+)", line)
if m:
obj.last_packet = datetime.datetime.fromtimestamp(float(m.group(1)))
#elapsed time: 0.536392
m = re.search("^\s*elapsed time:\s+(\d+.\d+)", line)
if m: obj.total_time = float(m.group(1))
#idletime max: 36528410.3 ms idletime max: 36528410.2 ms
m = re.search("^\s*idletime max:\s+(\d+.\d+)\s+ms\s+idletime max:\s+(\d+.\d+)\s+ms", line)
if m:
obj.idle_time_ab = float(m.group(1))/1000.0
obj.idle_time_ba = float(m.group(2))/1000.0
# if obj.srcip ==
m = re.search("^\s*unique bytes sent:\s+(\d+)\s+unique bytes sent:\s+(\d+)", line)
if m:
obj.bytes_ab = int(m.group(1))
obj.bytes_ba = int(m.group(2))
#total packets: 9342
m = re.search("^\s*total packets:\s+(\d+)",line)
if m: obj.total_packets = m.group(1)
#Validation
log.info("FLOWS EXTRACTION OK (%.3fs)" % (time.time() - startTime))
return objList
DAO.updateMetaData()
#Base.metadata.reflect
#print Base.metadata.tables['trace'].insert()
| {
"content_hash": "132f710df9dc6139573907149a59f64f",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 248,
"avg_line_length": 31.907894736842106,
"alnum_prop": 0.6240549828178694,
"repo_name": "inria-muse/hostview-processing",
"id": "089208dce717c9407f811ef59e544c173243cd6e",
"size": "7294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-src/flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1190131"
},
{
"name": "C++",
"bytes": "11795"
},
{
"name": "DIGITAL Command Language",
"bytes": "13504"
},
{
"name": "JavaScript",
"bytes": "67462"
},
{
"name": "Lex",
"bytes": "4802"
},
{
"name": "Makefile",
"bytes": "24577"
},
{
"name": "Python",
"bytes": "33593"
},
{
"name": "Roff",
"bytes": "11805"
},
{
"name": "Shell",
"bytes": "52376"
},
{
"name": "Yacc",
"bytes": "4759"
}
],
"symlink_target": ""
} |
import scrapy
import bs4
from google.cloud import storage
import os
import io
import re
from urllib import parse
BUCKET_NAME = 'cda-gazette-scraper'
REGS_FOLDER = 'regs'
class RegsSpider(scrapy.Spider):
name = "regs"
allowed_domains = ["www.gazette.gc.ca", "publications.gc.ca"]
def start_requests(self):
client = storage.Client()
self.bucket = client.get_bucket(BUCKET_NAME)
return [
scrapy.Request(url='http://www.gazette.gc.ca/archives/part2-archives-partie2-eng.html',
callback=self.recent_regs_callback),
scrapy.Request(url='http://www.gazette.gc.ca/archives/archives-eng.html',
callback=self.archived_regs_callback)
]
def recent_regs_callback(self, response):
doc = bs4.BeautifulSoup(response.body, 'html.parser')
content = doc.find('div', id='gazette_content')
content_list = content.find('ul')
reg_years = content_list.find_all('a')
for reg_year in reg_years:
year = int(re.match('.*\((\d\d\d\d)\)', reg_year.text).group(1))
url = response.urljoin(reg_year['href'])
yield scrapy.Request(
url=url,
callback=self.regs_for_recent_year_callback,
meta={'year': year}
)
def archived_regs_callback(self, response):
document = bs4.BeautifulSoup(response.body, 'html.parser')
table = document.find('table', id='archive-table')
table_body = table.find('tbody')
reg_years_col = table_body.find_all('ul', **{'class': 'list-bullet-none'})[1]
reg_years = reg_years_col.find_all('a')
for reg_year in reg_years:
year_text = reg_year.find(text=True, recursive=False).extract()
year = int(year_text.split('—')[1].strip())
yield scrapy.Request(
url=response.urljoin(reg_year['href']),
callback=self.regs_for_archived_year_callback,
meta={'year': year}
)
def regs_for_recent_year_callback(self, response):
doc = bs4.BeautifulSoup(response.body, 'html.parser')
table = doc.find('div', id='gazette_content')
for child in table.div.find_all(['div', 'h2'], recursive=False):
if child.name == 'h2' and child.text.startswith('Consolidate'):
break
elif child.name == 'div':
pdf_link = child.find('span', **{'class': 'index-pdf'}).a
year = response.meta['year']
title = pdf_link['title']
filename = self.extract_basename(pdf_link['href'])
self.logger.info("Scraping gazette - Title: {}, Year: {}".format(title, year))
if not self.check_if_file_in_storage(year, filename):
yield scrapy.FormRequest(
url=response.urljoin(pdf_link['href']),
formdata={'from_interstitial': '1'},
callback=self.download_file,
meta={'year': response.meta['year'], 'title': title, 'filename': filename}
)
else:
self.logger.info('Already in storage - Title: {}, Year: {}'.format(title, year))
def regs_for_archived_year_callback(self, response):
doc = bs4.BeautifulSoup(response.body, 'html.parser')
table = doc.find('div', id='gazette_content')
for child in table.div.find_all(['div', 'h2'], recursive=False):
if child.name == 'h2' and child.text.startswith('Consolidated'):
break
elif child.name == 'div':
title = child.strong.text
links = [link for link in child.find_all('a') if link.get('href', '').endswith('.pdf')]
for link in links:
year = response.meta['year']
filename = self.extract_basename(link['href'])
self.logger.info("Scraping gazette - Title: {}, Year: {}".format(title, year))
if not self.check_if_file_in_storage(year, filename):
headers = {
'Referer': "http://publications.gc.ca/site/archivee-archived.html?url={}".format(link['href'])
}
yield scrapy.Request(
url=link['href'],
headers=headers,
callback=self.download_file,
meta={'year': year, 'title': title, 'filename': filename}
)
else:
self.logger.info('Already in storage - Title: {}, Year: {}'.format(title, year))
@staticmethod
def construct_path(year, filename):
return '{}/{}/{}'.format(REGS_FOLDER, year, filename)
@staticmethod
def extract_basename(link):
path = parse.urlparse(link).path
return os.path.basename(path)
def check_if_file_in_storage(self, year, filename):
return self.bucket.get_blob(self.construct_path(year, filename)) is not None
def download_file(self, response):
if response.headers.to_unicode_dict()['Content-Type'] == 'application/pdf':
self.logger.info('Downloading Gazette - Year: {}, Filename: {}'.format(response.meta['year'], response.meta['filename']))
blob = storage.Blob(self.construct_path(response.meta['year'], response.meta['filename']), self.bucket)
body = response.body
body_bytes = io.BytesIO(response.body)
blob.upload_from_file(body_bytes, size=len(body))
self.logger.info("Downloading document for: Year {}, Title: {}".format(response.meta['year'], response.meta['title']))
| {
"content_hash": "2280fb6853dac7ceddc6353c1aa1a177",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 133,
"avg_line_length": 45.83464566929134,
"alnum_prop": 0.5517952241882838,
"repo_name": "JasonMWhite/cda-gazette-scraper",
"id": "a22361218a1a465b501d04e44ac1c1c599b3f227",
"size": "5847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gazette/gazette/spiders/regs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "15289"
}
],
"symlink_target": ""
} |
"""freelance URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.HomeView.as_view(), name='home'),
url(r'^invoices/', include('invoices.urls')),
]
| {
"content_hash": "b75eff9f82bb1be416fa35f4bb6782ae",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.6877828054298643,
"repo_name": "halfnibble/django-intro",
"id": "cfd2b0892bcc850b50d6782f80695d454a5d9edc",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freelance/freelance/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11011"
},
{
"name": "Python",
"bytes": "16869"
}
],
"symlink_target": ""
} |
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("pax-utils"):
skipModule("pax-utils package not installed")
class ScanelfTest(oeRuntimeTest):
def setUp(self):
self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
@skipUnlessPassed('test_ssh')
def test_scanelf_textrel(self):
# print TEXTREL information
self.scancmd += " --textrel"
(status, output) = self.target.run(self.scancmd)
self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
@skipUnlessPassed('test_ssh')
def test_scanelf_rpath(self):
# print RPATH information
self.scancmd += " --rpath"
(status, output) = self.target.run(self.scancmd)
self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
| {
"content_hash": "c64ef65dc7b44f993cf00741203b60b2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 35.23076923076923,
"alnum_prop": 0.6561135371179039,
"repo_name": "marcosbontempo/inatelos",
"id": "b9abf24640ff22ac7e1b69a3d5d0e9fac53b58bd",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poky-daisy/meta/lib/oeqa/runtime/scanelf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "158"
},
{
"name": "BitBake",
"bytes": "1910696"
},
{
"name": "BlitzBasic",
"bytes": "4400"
},
{
"name": "C",
"bytes": "1751572"
},
{
"name": "C++",
"bytes": "354295"
},
{
"name": "CMake",
"bytes": "6537"
},
{
"name": "CSS",
"bytes": "27029"
},
{
"name": "Groff",
"bytes": "502444"
},
{
"name": "HTML",
"bytes": "141762"
},
{
"name": "JavaScript",
"bytes": "22555"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32254"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "Perl",
"bytes": "66300"
},
{
"name": "Perl6",
"bytes": "73"
},
{
"name": "Python",
"bytes": "3529760"
},
{
"name": "Shell",
"bytes": "598521"
},
{
"name": "Tcl",
"bytes": "60106"
},
{
"name": "VimL",
"bytes": "8506"
},
{
"name": "XSLT",
"bytes": "8814"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
# App must be initialized before models or ADDONS_AVAILABLE are available
from website.app import init_app
init_app()
from osf.models import OSFUser, AbstractNode
from framework.database import paginated
from scripts.analytics.base import SnapshotAnalytics
from website.settings import ADDONS_AVAILABLE
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Modified from scripts/analytics/benchmarks.py
def get_enabled_authorized_linked(user_settings_list, has_external_account, short_name):
""" Gather the number of users who have at least one node in each of the stages for an addon
:param user_settings_list: list of user_settings for a particualr addon
:param has_external_account: where addon is derrived from, determines method to load node settings
:param short_name: short name of addon to get correct node_settings
:return: dict with number of users that have at least one project at each stage
"""
from addons.forward.models import NodeSettings as ForwardNodeSettings
num_enabled = 0 # of users w/ 1+ addon account connected
num_authorized = 0 # of users w/ 1+ addon account connected to 1+ node
num_linked = 0 # of users w/ 1+ addon account connected to 1+ node and configured
# osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
if short_name == 'osfstorage' or short_name == 'wiki':
num_enabled = num_authorized = num_linked = OSFUser.objects.filter(
is_registered=True,
password__isnull=False,
merged_by__isnull=True,
date_disabled__isnull=True,
date_confirmed__isnull=False
).count()
elif short_name == 'forward':
num_enabled = num_authorized = ForwardNodeSettings.objects.count()
num_linked = ForwardNodeSettings.objects.filter(url__isnull=False).count()
else:
for user_settings in paginated(user_settings_list):
node_settings_list = []
if has_external_account:
if user_settings.has_auth:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.oauth_grants.keys()]
else:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.nodes_authorized]
if any([ns.has_auth for ns in node_settings_list if ns]):
num_authorized += 1
if any([(ns.complete and ns.configured) for ns in node_settings_list if ns]):
num_linked += 1
return {
'enabled': num_enabled,
'authorized': num_authorized,
'linked': num_linked
}
class AddonSnapshot(SnapshotAnalytics):
@property
def collection_name(self):
return 'addon_snapshot'
def get_events(self, date=None):
super(AddonSnapshot, self).get_events(date)
counts = []
addons_available = {k: v for k, v in [(addon.short_name, addon) for addon in ADDONS_AVAILABLE]}
for short_name, addon in addons_available.iteritems():
has_external_account = hasattr(addon.models.get('nodesettings'), 'external_account')
connected_count = 0
deleted_count = 0
disconnected_count = 0
node_settings_model = addon.models.get('nodesettings')
if node_settings_model:
for node_settings in paginated(node_settings_model):
if node_settings.owner and not node_settings.owner.all_tags.filter(name='old_node_collection', system=True).exists():
connected_count += 1
deleted_count = addon.models['nodesettings'].objects.filter(deleted=True).count() if addon.models.get('nodesettings') else 0
if has_external_account:
disconnected_count = addon.models['nodesettings'].objects.filter(external_account__isnull=True, deleted=False).count() if addon.models.get('nodesettings') else 0
else:
if addon.models.get('nodesettings'):
for nsm in addon.models['nodesettings'].objects.filter(deleted=False):
if nsm.configured and not nsm.complete:
disconnected_count += 1
total = connected_count + deleted_count + disconnected_count
usage_counts = get_enabled_authorized_linked(addon.models.get('usersettings'), has_external_account, addon.short_name)
counts.append({
'provider': {
'name': short_name
},
'users': usage_counts,
'nodes': {
'total': total,
'connected': connected_count,
'deleted': deleted_count,
'disconnected': disconnected_count
}
})
logger.info(
'{} counted. Users with a linked node: {}, Total connected nodes: {}.'.format(
addon.short_name,
usage_counts['linked'],
total
)
)
return counts
def get_class():
return AddonSnapshot
if __name__ == '__main__':
addon_snapshot = AddonSnapshot()
events = addon_snapshot.get_events()
addon_snapshot.send_events(events)
| {
"content_hash": "f2618fee72922757c8944c784e6043b4",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 181,
"avg_line_length": 41.65413533834587,
"alnum_prop": 0.6108303249097473,
"repo_name": "icereval/osf.io",
"id": "1df5b81fbeb325b31c3f7555c587477a13d7a795",
"size": "5540",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "scripts/analytics/addon_snapshot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "108526"
},
{
"name": "HTML",
"bytes": "261937"
},
{
"name": "JavaScript",
"bytes": "1856123"
},
{
"name": "Mako",
"bytes": "691640"
},
{
"name": "Python",
"bytes": "8331919"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from hashlib import md5
from Crypto import Random
from Crypto.Cipher import AES
def _chr(i):
if isinstance(i, bytes):
return i # FIXME why is it mixed in python 2 ?
return chr(i).encode('utf-8')
def _derive_key_and_iv(password, salt, key_length, iv_length):
d = d_i = b''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length+iv_length]
def symmetric_encrypt(in_file, out_file, password, key_length=32):
""" OpenSSL compatible aes encryption.
Equivalent to `openssl aes-256-cbc -salt -in in_file -out out_file.enc`
Args:
in_file: Input file like object.
out_file: Output file like object.
password: Secure encryption password.
key_length: Key lenght.
Raises:
storjcore.sanitize.ValidationError: if input is invalid
Source:
http://stackoverflow.com/a/16761459
Added documentation, tests, input validation and ported to Python 3.
Example:
> from storjcore import encryptedio
> with open("in_file", 'rb') as fi, open("out_file.enc", 'wb') as fo:
> encryptedio.symmetric_encrypt(fi, fo, b"secure_password")
"""
# FIXME sanitize input
assert(isinstance(password, bytes))
assert(isinstance(key_length, int))
# encrypet
bs = AES.block_size
salt = Random.new().read(bs - len(b'Salted__'))
key, iv = _derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
out_file.write(b'Salted__' + salt)
finished = False
while not finished:
chunk = in_file.read(1024 * bs)
if len(chunk) == 0 or len(chunk) % bs != 0:
padding_length = (bs - len(chunk) % bs) or bs
chunk += padding_length * _chr(padding_length)
finished = True
encrypted_chunk = cipher.encrypt(chunk)
out_file.write(encrypted_chunk)
def symmetric_decrypt(in_file, out_file, password, key_length=32):
""" OpenSSL compatible aes decryption.
Equivalent to `openssl aes-256-cbc -d -in in_file.enc -out out_file`
Args:
in_file: Input file like object.
out_file: Output file like object.
password: Secure encryption password.
key_length: Key lenght.
Raises:
storjcore.sanitize.ValidationError: if input is invalid
Source:
http://stackoverflow.com/a/16761459
Added documentation, tests, input validation and ported to Python 3.
Example:
> from storjcore import encryptedio
> with open("in_file.enc", 'rb') as fi, open("out_file", 'wb') as fo:
> encryptedio.symmetric_decrypt(fi, fo, b"secure_password")
"""
# FIXME sanitize input
assert(isinstance(password, bytes))
assert(isinstance(key_length, int))
# decrypt
bs = AES.block_size
salt = in_file.read(bs)[len(b'Salted__'):]
key, iv = _derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
next_chunk = b''
finished = False
while not finished:
chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
if len(next_chunk) == 0:
padding_length = ord(_chr(chunk[-1]))
chunk = chunk[:-padding_length]
finished = True
out_file.write(chunk)
| {
"content_hash": "342bb6d566680c2230f330f60377f136",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 32.0377358490566,
"alnum_prop": 0.6195524146054181,
"repo_name": "littleskunk/storjcore",
"id": "adc8c245d9bced32a47a4aaf9fa831f0946cea76",
"size": "3396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storjcore/encryptedio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1635"
},
{
"name": "Python",
"bytes": "12926"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from payex import __version__
def publish():
"""Publish to Pypi"""
os.system("python setup.py sdist upload")
if sys.argv[-1] == "publish":
publish()
sys.exit()
# Requirements
install_requires = [
'suds==0.4',
]
if sys.version_info < (2, 7):
install_requires.append('ordereddict')
setup(
name='pypayex',
version=__version__,
description='Python module for interacting with the PayEx SOAP API',
long_description=open('README.md').read(),
author='payex',
author_email='opensource@payex.com',
url='https://github.com/PayEx/pypayex',
packages=['payex'],
license='BSD',
install_requires=install_requires,
test_suite='tests',
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
)
)
| {
"content_hash": "582d4edcad0a6ac52c89df7fc9001209",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 24.645833333333332,
"alnum_prop": 0.6306001690617076,
"repo_name": "funkbit/pypayex",
"id": "38a9f7df5efa6f982b4827a684332dbb4ebf3fce",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "29616"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import errno
import logging
import os
import six
import subprocess
import warnings
from future import standard_library
standard_library.install_aliases()
from builtins import str
from collections import OrderedDict
from configparser import ConfigParser
from .exceptions import AirflowConfigException
# show Airflow's deprecation warnings
warnings.filterwarnings(
action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(
action='default', category=PendingDeprecationWarning, module='airflow')
try:
from cryptography.fernet import Fernet
except ImportError:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderr = process.communicate()
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = {AIRFLOW_HOME}/logs
# Airflow can store logs remotely in AWS S3 or Google Cloud Storage. Users
# must supply a remote location URL (starting with either 's3://...' or
# 'gs://...') and an Airflow connection id that provides access to the storage
# location.
remote_base_log_folder =
remote_log_conn_id =
# Use server-side encryption for logs stored in S3
encrypt_s3_logs = False
# DEPRECATED option for remote log storage, use remote_base_log_folder instead!
s3_log_folder =
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The SqlAlchemy pool size is the maximum number of database connections
# in the pool.
sql_alchemy_pool_size = 5
# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite.
sql_alchemy_pool_recycle = 3600
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16
# Are DAGs paused by default at creation
dags_are_paused_at_creation = True
# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128
# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
# Whether to disable pickling dags
donot_pickle = False
# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30
# What security module to use (for example kerberos):
security =
# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
unit_test_mode = False
[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
web_server_ssl_cert =
web_server_ssl_key =
# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120
# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1
# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30
# Secret key used to run your flask app
secret_key = temporary_key
# Number of workers to run the Gunicorn web server
workers = 4
# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync
# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -
# Expose the configuration file in the web server
expose_config = False
# Set to true to turn on authentication:
# http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user
# Default DAG orientation. Valid values are:
# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
dag_orientation = LR
# Puts the webserver in demonstration mode; blurs the names of Operators for
# privacy.
demo_mode = False
# The amount of time (in secs) webserver will wait for initial handshake
# while fetching logs from other worker machine
log_fetch_timeout_sec = 5
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
# If you want airflow to send emails on retries, failure, and you want to use
# the airflow.utils.email.send_email_smtp function, you have to configure an
# smtp server here
smtp_host = localhost
smtp_starttls = True
smtp_ssl = False
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the IP that Celery Flower runs on
flower_host = 0.0.0.0
# This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# after how much time should the scheduler terminate in seconds
# -1 indicates to run continuously (see also num_runs)
run_duration = -1
# after how much time a new DAGs should be picked up from the filesystem
min_file_process_interval = 0
dag_dir_list_interval = 300
# How often should stats be printed to the logs
print_stats_interval = 30
child_process_log_directory = /tmp/airflow/scheduler/logs
# Statsd (https://github.com/etsy/statsd) integration settings
statsd_on = False
statsd_host = localhost
statsd_port = 8125
statsd_prefix = airflow
# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run. However airflow will never
# use more threads than the amount of cpu cores available.
max_threads = 2
authenticate = False
[mesos]
# Mesos master address which MesosExecutor will connect to.
master = localhost:5050
# The framework name which Airflow scheduler will register itself as on mesos
framework_name = Airflow
# Number of cpu cores required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_cpu = 1
# Memory in MB required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_memory = 256
# Enable framework checkpointing for mesos
# See http://mesos.apache.org/documentation/latest/slave-recovery/
checkpoint = False
# Failover timeout in milliseconds.
# When checkpointing is enabled and this option is set, Mesos waits
# until the configured timeout for
# the MesosExecutor framework to re-register after a failover. Mesos
# shuts down running tasks if the
# MesosExecutor framework fails to re-register within this timeframe.
# failover_timeout = 604800
# Enable framework authentication for mesos
# See http://mesos.apache.org/documentation/latest/configuration/
authenticate = False
# Mesos credentials, if authentication is enabled
# default_principal = admin
# default_secret = admin
[kerberos]
ccache = /tmp/airflow_krb5_ccache
# gets augmented with fqdn
principal = airflow
reinit_frequency = 3600
kinit_path = kinit
keytab = airflow.keytab
[github_enterprise]
api_rev = v3
[admin]
# UI to hide sensitive variable fields when set to True
hide_sensitive_variable_fields = True
"""
TEST_CONFIG = """\
[core]
unit_test_mode = True
airflow_home = {AIRFLOW_HOME}
dags_folder = {TEST_DAGS_FOLDER}
plugins_folder = {TEST_PLUGINS_FOLDER}
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
load_examples = True
donot_pickle = False
dag_concurrency = 16
dags_are_paused_at_creation = False
fernet_key = {FERNET_KEY}
non_pooled_task_slot_count = 128
[operators]
default_owner = airflow
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
dag_orientation = LR
log_fetch_timeout_sec = 5
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_host = 0.0.0.0
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
max_threads = 2
"""
class AirflowConfigParser(ConfigParser):
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}__cmd" pattern, the idea behind this
# is to not store password on boxes in text files.
as_command_stdout = {
('core', 'sql_alchemy_conn'),
('core', 'fernet_key'),
('celery', 'broker_url'),
('celery', 'celery_result_backend')
}
def __init__(self, *args, **kwargs):
ConfigParser.__init__(self, *args, **kwargs)
self.read_string(parameterized_config(DEFAULT_CONFIG))
self.is_validated = False
def read_string(self, string, source='<string>'):
"""
Read configuration from a string.
A backwards-compatible version of the ConfigParser.read_string()
method that was introduced in Python 3.
"""
# Python 3 added read_string() method
if six.PY3:
ConfigParser.read_string(self, string, source=source)
# Python 2 requires StringIO buffer
else:
import StringIO
self.readfp(StringIO.StringIO(string))
def _validate(self):
if (
self.get("core", "executor") != 'SequentialExecutor' and
"sqlite" in self.get('core', 'sql_alchemy_conn')):
raise AirflowConfigException(
"error: cannot use sqlite with the {}".format(
self.get('core', 'executor')))
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode") not in ['user', 'ldapgroup']
):
raise AirflowConfigException(
"error: owner_mode option should be either "
"'user' or 'ldapgroup' when filtering by owner is set")
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode").lower() == 'ldapgroup' and
self.get("core", "auth_backend") != (
'airflow.contrib.auth.backends.ldap_auth')
):
raise AirflowConfigException(
"error: attempt at using ldapgroup "
"filtering without using the Ldap backend")
self.is_validated = True
def _get_env_var_option(self, section, key):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
def _get_cmd_option(self, section, key):
fallback_key = key + '_cmd'
# if this is a valid command key...
if (section, key) in AirflowConfigParser.as_command_stdout:
# if the original key is present, return it no matter what
if self.has_option(section, key):
return ConfigParser.get(self, section, key)
# otherwise, execute the fallback key
elif self.has_option(section, fallback_key):
command = self.get(section, fallback_key)
return run_command(command)
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
# first check environment variables
option = self._get_env_var_option(section, key)
if option:
return option
# ...then the config file
if self.has_option(section, key):
return expand_env_var(
ConfigParser.get(self, section, key, **kwargs))
# ...then commands
option = self._get_cmd_option(section, key)
if option:
return option
else:
logging.warning("section/key [{section}/{key}] not found "
"in config".format(**locals()))
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val.lower() in ('t', 'true', '1'):
return True
elif val.lower() in ('f', 'false', '0'):
return False
else:
raise AirflowConfigException(
'The value for configuration option "{}:{}" is not a '
'boolean (received "{}").'.format(section, key, val))
def getint(self, section, key):
return int(self.get(section, key))
def getfloat(self, section, key):
return float(self.get(section, key))
def read(self, filenames):
ConfigParser.read(self, filenames)
self._validate()
def as_dict(self, display_source=False, display_sensitive=False):
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg' or 'default'.
:type display_source: bool
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:type display_sensitive: bool
"""
cfg = copy.deepcopy(self._sections)
# remove __name__ (affects Python 2 only)
for options in cfg.values():
options.pop('__name__', None)
# add source
if display_source:
for section in cfg:
for k, v in cfg[section].items():
cfg[section][k] = (v, 'airflow config')
# add env vars and overwrite because they have priority
for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]:
try:
_, section, key = ev.split('__')
opt = self._get_env_var_option(section, key)
except ValueError:
opt = None
if opt:
if (
not display_sensitive
and ev != 'AIRFLOW__CORE__UNIT_TEST_MODE'):
opt = '< hidden >'
if display_source:
opt = (opt, 'env var')
cfg.setdefault(section.lower(), OrderedDict()).update(
{key.lower(): opt})
# add bash commands
for (section, key) in AirflowConfigParser.as_command_stdout:
opt = self._get_cmd_option(section, key)
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'bash cmd')
cfg.setdefault(section, OrderedDict()).update({key: opt})
return cfg
def load_test_config(self):
"""
Load the unit test configuration.
Note: this is not reversible.
"""
# override any custom settings with defaults
self.read_string(parameterized_config(DEFAULT_CONFIG))
# then read test config
self.read_string(parameterized_config(TEST_CONFIG))
# then read any "custom" test settings
self.read(TEST_CONFIG_FILE)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
# "~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests',
'dags')
if os.path.exists(_TEST_DAGS_FOLDER):
TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER
else:
TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')
# Set up plugins folder for unit tests
_TEST_PLUGINS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests',
'plugins')
if os.path.exists(_TEST_PLUGINS_FOLDER):
TEST_PLUGINS_FOLDER = _TEST_PLUGINS_FOLDER
else:
TEST_PLUGINS_FOLDER = os.path.join(AIRFLOW_HOME, 'plugins')
def parameterized_config(template):
"""
Generates a configuration from the provided template + variables defined in
current scope
:param template: a config content templated with {{variables}}
"""
FERNET_KEY = generate_fernet_key()
all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}
return template.format(**all_vars)
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new airflow config file for unit tests in: " +
TEST_CONFIG_FILE)
with open(TEST_CONFIG_FILE, 'w') as f:
f.write(parameterized_config(TEST_CONFIG))
if not os.path.isfile(AIRFLOW_CONFIG):
# These configuration options are used to generate a default configuration
# when it is missing. The right way to change your configuration is to
# alter your configuration file, not this code.
logging.info("Creating new airflow config file in: " + AIRFLOW_CONFIG)
with open(AIRFLOW_CONFIG, 'w') as f:
f.write(parameterized_config(DEFAULT_CONFIG))
logging.info("Reading the config from " + AIRFLOW_CONFIG)
conf = AirflowConfigParser()
conf.read(AIRFLOW_CONFIG)
def load_test_config():
"""
Load the unit test configuration.
Note: this is not reversible.
"""
conf.load_test_config()
if conf.getboolean('core', 'unit_test_mode'):
load_test_config()
def get(section, key, **kwargs):
return conf.get(section, key, **kwargs)
def getboolean(section, key):
return conf.getboolean(section, key)
def getfloat(section, key):
return conf.getfloat(section, key)
def getint(section, key):
return conf.getint(section, key)
def has_option(section, key):
return conf.has_option(section, key)
def remove_option(section, option):
return conf.remove_option(section, option)
def as_dict(display_source=False, display_sensitive=False):
return conf.as_dict(
display_source=display_source, display_sensitive=display_sensitive)
as_dict.__doc__ = conf.as_dict.__doc__
def set(section, option, value): # noqa
return conf.set(section, option, value)
########################
# convenience method to access config entries
def get_dags_folder():
return os.path.expanduser(get('core', 'DAGS_FOLDER'))
| {
"content_hash": "ef92ab169c63710a2ce19ce23f3b076e",
"timestamp": "",
"source": "github",
"line_count": 776,
"max_line_length": 79,
"avg_line_length": 31.362113402061855,
"alnum_prop": 0.6813493857090027,
"repo_name": "jbhsieh/incubator-airflow",
"id": "ad8b3e382386f427da5d2edb5e1923e5ce9784a0",
"size": "24905",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "142758"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1497501"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
} |
'''
@author: MengLai
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageName_i_c7_z_1.5')
iso_path = os.environ.get('iso_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName')
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_hostname(vm_ip, tmp_file)
test_stub.update_mn_ip(vm_ip, tmp_file)
test_stub.reset_rabbitmq(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_mn_running(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
| {
"content_hash": "472a15fbcad232072665ceed0b71b401",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 34.59375,
"alnum_prop": 0.6806684733514001,
"repo_name": "zstackio/zstack-woodpecker",
"id": "dc0eadb9422146857f8f9517fd6171ab9ea64c63",
"size": "2214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/installation/upgrade/test_zs_upgd_1.5_latest_on_cos7.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import numpy as np
from scattertext.termranking import AbsoluteFrequencyRanker
from scattertext.termranking import DocLengthDividedFrequencyRanker
from scattertext.termranking import DocLengthNormalizedFrequencyRanker
from scattertext.termranking.OncePerDocFrequencyRanker import OncePerDocFrequencyRanker
from scattertext.test.test_TermDocMat import make_a_test_term_doc_matrix
class TestTermRanker(TestCase):
def test_absolute_frequency_ranker(self):
tdm = make_a_test_term_doc_matrix()
ranker = AbsoluteFrequencyRanker(tdm)
rank_df = ranker.get_ranks()
self.assertEqual(len(rank_df), 58)
self.assertEqual(rank_df.loc['hello'].tolist(), [1, 0])
self.assertEqual(rank_df.loc['blah'].tolist(), [0, 3])
self.assertEqual(rank_df.loc['name'].tolist(), [1, 1])
def test_doc_length_normalized_frequency_ranker(self):
tdm = make_a_test_term_doc_matrix()
len_ranker = DocLengthNormalizedFrequencyRanker(tdm)
abs_ranker = AbsoluteFrequencyRanker(tdm)
abs_rank_df = abs_ranker.get_ranks()
len_ranker_df = len_ranker.get_ranks()
self.assertEqual(len(abs_rank_df), len(len_ranker_df))
doc_lengths = [12, 35, 29]
avg_length = sum(doc_lengths) * 1. / len(doc_lengths)
np.testing.assert_almost_equal(np.array(len_ranker_df.loc['blah']),
[0, avg_length * 3. / 12])
np.testing.assert_almost_equal(np.array(len_ranker_df.loc['name']),
[avg_length * 1. / 35, avg_length * 1. / 29])
def test_doc_length_divided_frequency_ranker(self):
tdm = make_a_test_term_doc_matrix()
len_ranker = DocLengthDividedFrequencyRanker(tdm)
abs_ranker = AbsoluteFrequencyRanker(tdm)
abs_rank_df = abs_ranker.get_ranks()
len_ranker_df = len_ranker.get_ranks()
self.assertEqual(len(abs_rank_df), len(len_ranker_df))
doc_lengths = [12, 35, 29]
np.testing.assert_almost_equal(np.array(len_ranker_df.loc['blah']),
[0, 3. / 12])
np.testing.assert_almost_equal(np.array(len_ranker_df.loc['name']),
[1. / 35, 1. / 29])
def test_once_per_doc_frequency_ranker(self):
tdm = make_a_test_term_doc_matrix()
abs_ranker = DocLengthDividedFrequencyRanker(tdm)
one_ranker = OncePerDocFrequencyRanker(tdm)
abs_rank_df = abs_ranker.get_ranks()
len_ranker_df = one_ranker.get_ranks()
self.assertEqual(len(abs_rank_df), len(len_ranker_df))
np.testing.assert_almost_equal(np.array(len_ranker_df.loc['blah']),
[0, 1])
np.testing.assert_almost_equal(np.array(len_ranker_df.loc['name']),
[1, 1])
| {
"content_hash": "f26b4f8ca3a9f9c7e7a45b234e4a0e3c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 87,
"avg_line_length": 43.278688524590166,
"alnum_prop": 0.6772727272727272,
"repo_name": "JasonKessler/scattertext",
"id": "dc86ff900632e0abf3da2cb50baa4bebf56a5d47",
"size": "2640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scattertext/test/test_termRanker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "52028"
},
{
"name": "JavaScript",
"bytes": "497904"
},
{
"name": "Python",
"bytes": "1183530"
},
{
"name": "Shell",
"bytes": "306"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask.ext.socketio import SocketIO
import redis
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('config.py')
redis_db = redis.StrictRedis(host=app.config['REDIS_SERVER'],
port=app.config['REDIS_PORT'],
db=app.config['REDIS_DB'])
socketio = SocketIO(app)
from . import views
from . import websockets
| {
"content_hash": "5735a3ab70cad1272f4139d05d87f2ec",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 61,
"avg_line_length": 27.6,
"alnum_prop": 0.6376811594202898,
"repo_name": "Belekkk/Presentations",
"id": "d4322e85d2c90690c7daa80b1eefdad89f8416db",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presentations/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "124241"
},
{
"name": "HTML",
"bytes": "45403"
},
{
"name": "JavaScript",
"bytes": "154778"
},
{
"name": "Python",
"bytes": "2443"
}
],
"symlink_target": ""
} |
from .spinner_asynicio import main | {
"content_hash": "c25554e753faca49399f3faf801699c5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 34,
"alnum_prop": 0.8529411764705882,
"repo_name": "kissf-lu/jupyter_app",
"id": "24f196cf46527de9f355bd6fc7b4038a24f91309",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipython/py36_simpy_sim/fluent_python/concurrency_asyncio/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1175274"
},
{
"name": "Python",
"bytes": "2436676"
}
],
"symlink_target": ""
} |
import tempfile
import shutil
import os
from voltgrid import ConfigManager
VG_CFG = os.path.join(os.path.abspath(os.path.split(__file__)[0]), 'voltgrid.conf')
def test_config_manager():
c = ConfigManager(VG_CFG)
c.write_envs()
def test_config_is_empty():
with tempfile.NamedTemporaryFile() as tmp_f:
c = ConfigManager(tmp_f.name)
def test_config_not_exist():
c = ConfigManager('does-not-exist')
def test_git_config():
git_url = 'git@github.com:voltgrid/voltgrid-pie.git'
os.environ['GIT_URL'] = git_url
c = ConfigManager(VG_CFG)
c.load_git_conf()
assert(c.git_url == git_url)
def test_git_config_no_vgconf():
git_url = 'git@github.com:voltgrid/voltgrid-pie.git'
git_dst = '/tmp/git'
os.environ['GIT_URL'] = git_url
os.environ['GIT_DST'] = git_dst
c = ConfigManager('does-not-exist')
c.load_git_conf()
assert(c.git_url == git_url)
assert(c.git_dst == git_dst)
def test_strip_crlf():
os.environ['VG_CONF_PATH'] = VG_CFG
os.environ['MYVARIABLE'] = '\r\nfoo \nbar\r' # Context
c = ConfigManager(VG_CFG)
c.load_config()
assert(c.config['MYVARIABLE'] == 'foo bar')
| {
"content_hash": "744bb413801104e10185dc9fea08eca6",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 83,
"avg_line_length": 24.375,
"alnum_prop": 0.6418803418803419,
"repo_name": "voltgrid/voltgrid-pie",
"id": "d5dce222f13896687f9b41b417782ef8b3daa9a8",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17692"
}
],
"symlink_target": ""
} |
import numpy as np
import sklearn.cross_validation as skl_cross_validation
from Orange.data import Table, Domain, ContinuousVariable, DiscreteVariable
__all__ = ["Results", "CrossValidation", "LeaveOneOut", "TestOnTrainingData",
"ShuffleSplit", "TestOnTestData", "sample"]
class Results:
"""
Class for storing predictions in model testing.
.. attribute:: data
Data used for testing (optional; can be `None`). When data is stored,
this is typically not a copy but a reference.
.. attribute:: row_indices
Indices of rows in :obj:`data` that were used in testing, stored as
a numpy vector of length `nrows`. Values of `actual[i]`, `predicted[i]`
and `probabilities[i]` refer to the target value of instance
`data[row_indices[i]]`.
.. attribute:: nrows
The number of test instances (including duplicates).
.. attribute:: models
A list of induced models (optional; can be `None`).
.. attribute:: actual
Actual values of target variable; a numpy vector of length `nrows` and
of the same type as `data` (or `np.float32` if the type of data cannot
be determined).
.. attribute:: predicted
Predicted values of target variable; a numpy array of shape
(number-of-methods, `nrows`) and of the same type as `data` (or
`np.float32` if the type of data cannot be determined).
.. attribute:: probabilities
Predicted probabilities (for discrete target variables); a numpy array
of shape (number-of-methods, `nrows`, number-of-classes) of type
`np.float32`.
.. attribute:: folds
A list of indices (or slice objects) corresponding to rows of each
fold; `None` if not applicable.
"""
# noinspection PyBroadException
# noinspection PyNoneFunctionAssignment
def __init__(self, data=None, nmethods=0, nrows=None, nclasses=None,
store_data=False, store_models=False, domain=None,
actual=None, row_indices=None,
predicted=None, probabilities=None,
preprocessor=None, callback=None):
"""
Construct an instance with default values: `None` for :obj:`data` and
:obj:`models`.
If the number of rows and/or the number of classes is not given, it is
inferred from :obj:`data`, if provided. The data type for
:obj:`actual` and :obj:`predicted` is determined from the data; if the
latter cannot be find, `np.float32` is used.
Attribute :obj:`actual` and :obj:`row_indices` are constructed as empty
(uninitialized) arrays of the appropriate size, if the number of rows
is known. Attribute :obj:`predicted` is constructed if the number of
rows and of methods is given; :obj:`probabilities` also requires
knowing the number of classes.
:param data: Data or domain
:type data: Orange.data.Table or Orange.data.Domain
:param nmethods: The number of methods that will be tested
:type nmethods: int
:param nrows: The number of test instances (including duplicates)
:type nrows: int
:param nclasses: The number of class values
:type nclasses: int
:param store_data: A flag that tells whether to store the data;
this argument can be given only as keyword argument
:type store_data: bool
:param store_models: A flag that tells whether to store the models;
this argument can be given only as keyword argument
:type store_models: bool
:param preprocessor: Preprocessor for training data
:type preprocessor: Orange.preprocess.Preprocess
:param callback: Function for reporting back the progress as a value
between 0 and 1
:type callback: callable
"""
self.store_data = store_data
self.store_models = store_models
self.data = data if store_data else None
self.models = None
self.folds = None
dtype = np.float32
self.preprocessor = preprocessor
self.callback = callback
def set_or_raise(value, exp_values, msg):
for exp_value in exp_values:
if exp_value is False:
continue
if value is None:
value = exp_value
elif value != exp_value:
raise ValueError(msg)
return value
domain = self.domain = set_or_raise(
domain, [data is not None and data.domain],
"mismatching domain")
nrows = set_or_raise(
nrows, [data is not None and len(data),
actual is not None and len(actual),
row_indices is not None and len(row_indices),
predicted is not None and predicted.shape[1],
probabilities is not None and probabilities.shape[1]],
"mismatching number of rows")
nclasses = set_or_raise(
nclasses, [domain and (len(domain.class_var.values)
if domain.has_discrete_class
else None),
probabilities is not None and probabilities.shape[2]],
"mismatching number of class values")
if nclasses is not None and probabilities is not None:
raise ValueError("regression results cannot have 'probabilities'")
nmethods = set_or_raise(
nmethods, [predicted is not None and predicted.shape[0],
probabilities is not None and probabilities.shape[0]],
"mismatching number of methods")
try:
dtype = data.Y.dtype
except AttributeError: # no data or no Y or not numpy
pass
if nmethods is not None:
self.failed = [False] * nmethods
if actual is not None:
self.actual = actual
elif nrows is not None:
self.actual = np.empty(nrows, dtype=dtype)
if row_indices is not None:
self.row_indices = row_indices
elif nrows is not None:
self.row_indices = np.empty(nrows, dtype=np.int32)
if predicted is not None:
self.predicted = predicted
elif nmethods is not None and nrows is not None:
self.predicted = np.empty((nmethods, nrows), dtype=dtype)
if probabilities is not None:
self.probabilities = probabilities
elif nmethods is not None and nrows is not None and \
nclasses is not None:
self.probabilities = \
np.empty((nmethods, nrows, nclasses), dtype=np.float32)
def train_if_succ(self, learner_index, learner, data):
if self.failed[learner_index]:
return False
try:
return learner(data)
except Exception as ex:
self.failed[learner_index] = ex
return False
def call_callback(self, progress):
if self.callback:
self.callback(progress)
def get_fold(self, fold):
results = Results()
results.data = self.data
if self.folds is None:
raise ValueError("This 'Results' instance does not have folds.")
if self.models is not None:
results.models = self.models[fold]
results.row_indices = self.row_indices[self.folds[fold]]
results.actual = self.actual[self.folds[fold]]
results.predicted = self.predicted[:, self.folds[fold]]
results.domain = self.domain
if self.probabilities is not None:
results.probabilities = self.probabilities[:, self.folds[fold]]
return results
def get_augmented_data(self, model_names, include_attrs=True, include_predictions=True, include_probabilities=True):
"""
Return the data, augmented with predictions, probabilities (if the task is classification) and folds info.
Predictions, probabilities and folds are inserted as meta attributes.
Args:
model_names (list): A list of strings containing learners' names.
include_attrs (bool): Flag that tells whether to include original attributes.
include_predictions (bool): Flag that tells whether to include predictions.
include_probabilities (bool): Flag that tells whether to include probabilities.
Returns:
Orange.data.Table: Data augmented with predictions, (probabilities) and (fold).
"""
assert self.predicted.shape[0] == len(model_names)
data = self.data[self.row_indices]
class_var = data.domain.class_var
classification = class_var and class_var.is_discrete
new_meta_attr = []
new_meta_vals = np.empty((len(data), 0))
if classification:
# predictions
if include_predictions:
new_meta_attr.extend(DiscreteVariable(name=name, values=class_var.values)
for name in model_names)
new_meta_vals = np.hstack((new_meta_vals, self.predicted.T))
# probabilities
if include_probabilities:
for name in model_names:
new_meta_attr.extend(ContinuousVariable(name="%s (%s)" % (name, value))
for value in class_var.values)
for i in self.probabilities:
new_meta_vals = np.hstack((new_meta_vals, i))
elif include_predictions:
# regression
new_meta_attr.extend(ContinuousVariable(name=name)
for name in model_names)
new_meta_vals = np.hstack((new_meta_vals, self.predicted.T))
# add fold info
if self.folds is not None:
new_meta_attr.append(DiscreteVariable(name="Fold", values=[i+1 for i, s in enumerate(self.folds)]))
fold = np.empty((len(data), 1))
for i, s in enumerate(self.folds):
fold[s, 0] = i
new_meta_vals = np.hstack((new_meta_vals, fold))
# append new columns to meta attributes
new_meta_attr = list(data.domain.metas) + new_meta_attr
new_meta_vals = np.hstack((data.metas, new_meta_vals))
X = data.X if include_attrs else np.empty((len(data), 0))
attrs = data.domain.attributes if include_attrs else []
domain = Domain(attrs, data.domain.class_vars, metas=new_meta_attr)
predictions = Table.from_numpy(domain, X, data.Y, metas=new_meta_vals)
predictions.name = data.name
return predictions
class CrossValidation(Results):
"""
K-fold cross validation.
If the constructor is given the data and a list of learning algorithms, it
runs cross validation and returns an instance of `Results` containing the
predicted values and probabilities.
.. attribute:: k
The number of folds.
.. attribute:: random_state
"""
def __init__(self, data, learners, k=10, random_state=0, store_data=False,
store_models=False, preprocessor=None, callback=None, warnings=None):
super().__init__(data, len(learners), store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback)
self.k = k
self.random_state = random_state
Y = data.Y.copy().flatten()
indices = None
if data.domain.has_discrete_class:
indices = skl_cross_validation.StratifiedKFold(
Y, self.k, shuffle=True, random_state=self.random_state
)
if any(len(train) == 0 or len(test) == 0 for train, test in indices):
if warnings is not None:
warnings.append("Using non-stratified sampling.")
indices = None
if indices is None:
indices = skl_cross_validation.KFold(
len(Y), self.k, shuffle=True, random_state=self.random_state
)
self.folds = []
if self.store_models:
self.models = []
ptr = 0
nmethods = len(learners)
n_callbacks = nmethods * self.k
for fold_idx, (train, test) in enumerate(indices):
train_data, test_data = data[train], data[test]
if len(train_data) == 0 or len(test_data) == 0:
raise RuntimeError("One of the train or test folds is empty.")
if self.preprocessor is not None:
train_data = self.preprocessor(train_data)
fold_slice = slice(ptr, ptr + len(test))
self.folds.append(fold_slice)
self.row_indices[fold_slice] = test
self.actual[fold_slice] = test_data.Y.flatten()
if self.store_models:
fold_models = [None] * nmethods
self.models.append(fold_models)
for i, learner in enumerate(learners):
model = self.train_if_succ(i, learner, train_data)
self.call_callback((fold_idx * nmethods + i) / n_callbacks)
if not model:
continue
if self.store_models:
fold_models[i] = model
if data.domain.has_discrete_class:
values, probs = model(test_data, model.ValueProbs)
self.predicted[i][fold_slice] = values
self.probabilities[i][fold_slice, :] = probs
elif data.domain.has_continuous_class:
values = model(test_data, model.Value)
self.predicted[i][fold_slice] = values
ptr += len(test)
self.call_callback(1)
class LeaveOneOut(Results):
"""Leave-one-out testing"""
def __init__(self, data, learners, store_data=False, store_models=False,
preprocessor=None, callback=None):
super().__init__(data, len(learners), store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback)
domain = data.domain
X = data.X.copy()
Y = data._Y.copy()
metas = data.metas.copy()
teX, trX = X[:1], X[1:]
teY, trY = Y[:1], Y[1:]
te_metas, tr_metas = metas[:1], metas[1:]
if data.has_weights():
W = data.W.copy()
teW, trW = W[:1], W[1:]
else:
W = teW = trW = None
self.row_indices = np.arange(len(data))
if self.store_models:
self.models = []
self.actual = Y.flatten()
nmethods = len(learners)
n_callbacks = nmethods * len(data)
for test_idx in self.row_indices:
X[[0, test_idx]] = X[[test_idx, 0]]
Y[[0, test_idx]] = Y[[test_idx, 0]]
metas[[0, test_idx]] = metas[[test_idx, 0]]
if W:
W[[0, test_idx]] = W[[test_idx, 0]]
test_data = Table.from_numpy(domain, teX, teY, te_metas, teW)
train_data = Table.from_numpy(domain, trX, trY, tr_metas, trW)
if self.preprocessor is not None:
train_data = self.preprocessor(train_data)
if self.store_models:
fold_models = [None] * nmethods
self.models.append(fold_models)
for i, learner in enumerate(learners):
model = self.train_if_succ(i, learner, train_data)
self.call_callback((test_idx * nmethods + i) / n_callbacks)
if not model:
continue
if self.store_models:
fold_models[i] = model
if data.domain.has_discrete_class:
values, probs = model(test_data, model.ValueProbs)
self.predicted[i][test_idx] = values
self.probabilities[i][test_idx, :] = probs
elif data.domain.has_continuous_class:
values = model(test_data, model.Value)
self.predicted[i][test_idx] = values
self.call_callback(1)
class TestOnTrainingData(Results):
"""Trains and test on the same data"""
def __init__(self, data, learners, store_data=False, store_models=False,
preprocessor=None, callback=None):
super().__init__(data, len(learners), store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback)
self.row_indices = np.arange(len(data))
nmethods = len(learners)
if self.store_models:
models = [None] * nmethods
self.models = [models]
self.actual = data.Y.flatten()
if self.preprocessor is not None:
train_data = self.preprocessor(data)
else:
train_data = data
for i, learner in enumerate(learners):
model = self.train_if_succ(i, learner, train_data)
self.call_callback(i / nmethods)
if not model:
continue
if self.store_models:
models[i] = model
if data.domain.has_discrete_class:
values, probs = model(data, model.ValueProbs)
self.predicted[i] = values
self.probabilities[i] = probs
elif data.domain.has_continuous_class:
values = model(data, model.Value)
self.predicted[i] = values
self.call_callback(1)
class ShuffleSplit(Results):
def __init__(self, data, learners, n_resamples=10, train_size=None,
test_size=0.1, random_state=0, store_data=False,
store_models=False, preprocessor=None, callback=None):
super().__init__(data, len(learners), store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback)
self.store_models = store_models
self.n_resamples = n_resamples
self.train_size = train_size
self.test_size = test_size
self.random_state = random_state
indices = skl_cross_validation.ShuffleSplit(
len(data), n_iter=self.n_resamples, train_size=self.train_size,
test_size=test_size, random_state=self.random_state
)
self.folds = []
if self.store_models:
self.models = []
row_indices = []
actual = []
predicted = [[] for _ in learners]
probabilities = [[] for _ in learners]
fold_start = 0
nmethods = len(learners)
n_callbacks = self.n_resamples * nmethods
for samp_idx, (train, test) in enumerate(indices):
train_data, test_data = data[train], data[test]
if preprocessor is not None:
train_data = self.preprocessor(train_data)
self.folds.append(slice(fold_start, fold_start + len(test)))
row_indices.append(test)
actual.append(test_data.Y.flatten())
if self.store_models:
fold_models = [None] * nmethods
self.models.append(fold_models)
for i, learner in enumerate(learners):
model = self.train_if_succ(i, learner, train_data)
self.call_callback((samp_idx * nmethods + i ) / n_callbacks)
if model:
if self.store_models:
fold_models[i] = model
if data.domain.has_discrete_class:
values, probs = model(test_data, model.ValueProbs)
predicted[i].append(values)
probabilities[i].append(probs)
elif data.domain.has_continuous_class:
values = model(test_data, model.Value)
predicted[i].append(values)
else:
predicted[i].append(np.zeros((len(test_data),)))
if data.domain.has_discrete_class:
probabilities[i].append(
np.zeros((len(test_data),
len(data.domain.class_var.values))))
fold_start += len(test)
row_indices = np.hstack(row_indices)
actual = np.hstack(actual)
predicted = np.array([np.hstack(pred) for pred in predicted])
if data.domain.has_discrete_class:
probabilities = np.array([np.vstack(prob) for prob in probabilities])
nrows = len(actual)
nmodels = len(predicted)
self.nrows = len(actual)
self.row_indices = row_indices
self.actual = actual
self.predicted = predicted.reshape(nmodels, nrows)
if data.domain.has_discrete_class:
self.probabilities = probabilities
self.call_callback(1)
class TestOnTestData(Results):
"""
Test on a separate test data set.
"""
def __init__(self, train_data, test_data, learners, store_data=False,
store_models=False, preprocessor=None, callback=None):
super().__init__(test_data, len(learners), store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback)
nmethods = len(learners)
if self.store_models:
models = [None] * nmethods
self.models = [models]
self.row_indices = np.arange(len(test_data))
self.actual = test_data.Y.flatten()
if self.preprocessor is not None:
train_data = self.preprocessor(train_data)
for i, learner in enumerate(learners):
model = self.train_if_succ(i, learner, train_data)
self.call_callback(i / nmethods)
if not model:
continue
if train_data.domain.has_discrete_class:
values, probs = model(test_data, model.ValueProbs)
self.predicted[i] = values
self.probabilities[i][:, :] = probs
elif train_data.domain.has_continuous_class:
values = model(test_data, model.Value)
self.predicted[i] = values
if self.store_models:
models[i] = model
self.nrows = len(test_data)
self.folds = [slice(0, len(test_data))]
self.call_callback(1)
def sample(table, n=0.7, stratified=False, replace=False,
random_state=None):
"""
Samples data instances from a data table. Returns the sample and
a data set from input data table that are not in the sample. Also
uses several sampling functions from
`scikit-learn <http://scikit-learn.org>`_.
table : data table
A data table from which to sample.
n : float, int (default = 0.7)
If float, should be between 0.0 and 1.0 and represents
the proportion of data instances in the resulting sample. If
int, n is the number of data instances in the resulting sample.
stratified : bool, optional (default = False)
If true, sampling will try to consider class values and
match distribution of class values
in train and test subsets.
replace : bool, optional (default = False)
sample with replacement
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
if type(n) == float:
n = int(n * len(table))
if replace:
if random_state is None:
rgen = np.random
else:
rgen = np.random.mtrand.RandomState(random_state)
sample = rgen.random_integers(0, len(table) - 1, n)
o = np.ones(len(table))
o[sample] = 0
others = np.nonzero(o)[0]
return table[sample], table[others]
n = len(table) - n
if stratified and table.domain.has_discrete_class:
test_size = max(len(table.domain.class_var.values), n)
ind = skl_cross_validation.StratifiedShuffleSplit(
table.Y.ravel(), n_iter=1,
test_size=test_size, train_size=len(table) - test_size,
random_state=random_state)
else:
ind = skl_cross_validation.ShuffleSplit(
len(table), n_iter=1,
test_size=n, random_state=random_state)
ind = next(iter(ind))
return table[ind[0]], table[ind[1]]
| {
"content_hash": "2c6975753969bfd2810a23a6c3411648",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 120,
"avg_line_length": 40.122749590834694,
"alnum_prop": 0.573648786457271,
"repo_name": "kwikadi/orange3",
"id": "9961d810740c7b8026bd978e71f7428a4421ee33",
"size": "24515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Orange/evaluation/testing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "1985"
},
{
"name": "JavaScript",
"bytes": "3025"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "19900"
},
{
"name": "Python",
"bytes": "4052846"
},
{
"name": "Shell",
"bytes": "39117"
}
],
"symlink_target": ""
} |
"""
Handle objects from this module relevant to a Contact or a User
"""
from treeio.core.models import Object
from treeio.projects.templatetags.projects import projects_task_list
CONTACT_OBJECTS = {}
CONTACT_OBJECTS['manager'] = {'label': 'Managed Projects',
'objects': [],
'templatetag': None}
CONTACT_OBJECTS['client'] = {'label': 'Ordered Projects',
'objects': [],
'templatetag': None}
CONTACT_OBJECTS['task_set'] = {'label': 'Managed Tasks',
'objects': [],
'templatetag': projects_task_list}
USER_OBJECTS = {}
USER_OBJECTS['task_set'] = {'label': 'Assigned Tasks',
'objects': [],
'templatetag': projects_task_list}
def get_contact_objects(current_user, contact):
"""
Returns a dictionary with keys specified as contact attributes
and values as dictionaries with labels and set of relevant objects.
"""
objects = dict(CONTACT_OBJECTS)
for key in objects:
if hasattr(contact, key):
manager = getattr(contact, key)
try:
manager = manager.filter(status__hidden=False)
except:
pass
objects[key]['objects'] = Object.filter_permitted(
current_user, manager)
return objects
def get_user_objects(current_user, user):
"""
Returns a dictionary with keys specified as contact attributes
and values as dictionaries with labels and set of relevant objects.
"""
objects = dict(USER_OBJECTS)
for key in objects:
if hasattr(user, key):
manager = getattr(user, key)
try:
manager = manager.filter(status__hidden=False)
except:
pass
objects[key]['objects'] = Object.filter_permitted(
current_user, manager)
return objects
| {
"content_hash": "a86a12b06c384e0d888ea59c8daeef3b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 71,
"avg_line_length": 32.03174603174603,
"alnum_prop": 0.5555004955401388,
"repo_name": "nuwainfo/treeio",
"id": "316404e3ae966766e4269835c51554fe357e988c",
"size": "2131",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "projects/identities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "400811"
},
{
"name": "JavaScript",
"bytes": "2137384"
},
{
"name": "Makefile",
"bytes": "4598"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2827961"
},
{
"name": "Shell",
"bytes": "6488"
}
],
"symlink_target": ""
} |
from os.path import isdir, dirname, abspath
from os import getcwd
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding, stdout
from collections import OrderedDict, defaultdict
import datetime, re
import pytz
import warnings
import numpy as np
import pandas as pd
import networkx as nx
from IPython.core.display import HTML, display
from sklearn.metrics import mean_squared_error
from nilmtk.datastore import HDFDataStore, CSVDataStore
def show_versions():
"""Prints versions of various dependencies"""
output = OrderedDict()
output["Date"] = str(datetime.datetime.now())
import sys
import platform
output["Platform"] = str(platform.platform())
system_information = sys.version_info
output["System version"] = "{}.{}".format(system_information.major,
system_information.minor)
PACKAGES = [
"nilmtk", "nilm_metadata", "numpy", "matplotlib", "pandas", "sklearn",
"hmmlearn"]
for package_name in PACKAGES:
key = package_name + " version"
try:
exec("import " + package_name)
except ImportError:
output[key] = "Not found"
else:
output[key] = eval(package_name + ".__version__")
try:
print(pd.show_versions())
except:
pass
else:
print("")
for k, v in output.items():
print("{}: {}".format(k, v))
def timedelta64_to_secs(timedelta):
"""Convert `timedelta` to seconds.
Parameters
----------
timedelta : np.timedelta64
Returns
-------
float : seconds
"""
if len(timedelta) == 0:
return np.array([])
else:
return timedelta / np.timedelta64(1, 's')
def tree_root(graph):
"""Returns the object that is the root of the tree.
Parameters
----------
graph : networkx.Graph
"""
# from http://stackoverflow.com/a/4123177/732596
assert isinstance(graph, nx.Graph)
roots = [node for node, in_degree in graph.in_degree()
if in_degree == 0]
n_roots = len(roots)
if n_roots > 1:
raise RuntimeError('Tree has more than one root!')
if n_roots == 0:
raise RuntimeError('Tree has no root!')
return roots[0]
def nodes_adjacent_to_root(graph):
root = tree_root(graph)
return graph.successors(root)
def index_of_column_name(df, name):
for i, col_name in enumerate(df.columns):
if col_name == name:
return i
raise KeyError(name)
def find_nearest(known_array, test_array):
"""Find closest value in `known_array` for each element in `test_array`.
Parameters
----------
known_array : numpy array
consisting of scalar values only; shape: (m, 1)
test_array : numpy array
consisting of scalar values only; shape: (n, 1)
Returns
-------
indices : numpy array; shape: (n, 1)
For each value in `test_array` finds the index of the closest value
in `known_array`.
residuals : numpy array; shape: (n, 1)
For each value in `test_array` finds the difference from the closest
value in `known_array`.
"""
# from http://stackoverflow.com/a/20785149/732596
index_sorted = np.argsort(known_array)
known_array_sorted = known_array[index_sorted]
idx1 = np.searchsorted(known_array_sorted, test_array)
idx2 = np.clip(idx1 - 1, 0, len(known_array_sorted)-1)
idx3 = np.clip(idx1, 0, len(known_array_sorted)-1)
diff1 = known_array_sorted[idx3] - test_array
diff2 = test_array - known_array_sorted[idx2]
indices = index_sorted[np.where(diff1 <= diff2, idx3, idx2)]
residuals = test_array - known_array[indices]
return indices, residuals
def container_to_string(container, sep='_'):
if isinstance(container, str):
string = container
else:
try:
string = sep.join([str(element) for element in container])
except TypeError:
string = str(container)
return string
def simplest_type_for(values):
n_values = len(values)
if n_values == 1:
return list(values)[0]
elif n_values == 0:
return
else:
return tuple(values)
def flatten_2d_list(list2d):
list1d = []
for item in list2d:
if isinstance(item, str):
list1d.append(item)
else:
try:
len(item)
except TypeError:
list1d.append(item)
else:
list1d.extend(item)
return list1d
def get_index(data):
"""
Parameters
----------
data : pandas.DataFrame or Series or DatetimeIndex
Returns
-------
index : the index for the DataFrame or Series
"""
if isinstance(data, (pd.DataFrame, pd.Series)):
index = data.index
elif isinstance(data, pd.DatetimeIndex):
index = data
else:
raise TypeError('wrong type for `data`.')
return index
def convert_to_timestamp(t):
"""
Parameters
----------
t : str or pd.Timestamp or datetime or None
Returns
-------
pd.Timestamp or None
"""
return None if t is None else pd.Timestamp(t)
def get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
def dict_to_html(dictionary):
def format_string(value):
try:
if isinstance(value, str) and 'http' in value:
html = re.sub(r'(http[^\s\)]+)', r'<a href="\1">\1</a>', value)
else:
html = '{}'.format(value)
except UnicodeEncodeError:
html = ''
return html
html = '<ul>'
for key, value in dictionary.items():
html += '<li><strong>{}</strong>: '.format(key)
if isinstance(value, list):
html += '<ul>'
for item in value:
html += '<li>{}</li>'.format(format_string(item))
html += '</ul>'
elif isinstance(value, dict):
html += dict_to_html(value)
else:
html += format_string(value)
html += '</li>'
html += '</ul>'
return html
def print_dict(dictionary):
html = dict_to_html(dictionary)
display(HTML(html))
def offset_alias_to_seconds(alias):
"""Seconds for each period length."""
dr = pd.date_range('00:00', periods=2, freq=alias)
return (dr[-1] - dr[0]).total_seconds()
def check_directory_exists(d):
if not isdir(d):
raise IOError("Directory '{}' does not exist.".format(d))
def tz_localize_naive(timestamp, tz):
if tz is None:
return timestamp
if timestamp is None or pd.isnull(timestamp):
return pd.NaT
timestamp = pd.Timestamp(timestamp)
if timestamp_is_naive(timestamp):
timestamp = timestamp.tz_localize('UTC')
return timestamp.tz_convert(tz)
def get_tz(df):
index = df.index
try:
tz = index.tz
except AttributeError:
tz = None
return tz
def timestamp_is_naive(timestamp):
"""
Parameters
----------
timestamp : pd.Timestamp or datetime.datetime
Returns
-------
True if `timestamp` is naive (i.e. if it does not have a
timezone associated with it). See:
https://docs.python.org/3/library/datetime.html#available-types
"""
if timestamp.tzinfo is None:
return True
elif timestamp.tzinfo.utcoffset(timestamp) is None:
return True
else:
return False
def get_datastore(filename, format=None, mode='r'):
"""
Parameters
----------
filename : string
format : 'CSV' or 'HDF', default: infer from filename ending.
mode : 'r' (read-only), 'a' (append) or 'w' (write), default: 'r'
Returns
-------
metadata : dict
"""
if not format:
if filename.endswith(".h5"):
format = "HDF"
elif filename.endswith(".csv"):
format = "CSV"
if filename is not None:
if format == "HDF":
return HDFDataStore(filename, mode)
elif format == "CSV":
return CSVDataStore(filename)
else:
raise ValueError('format not recognised')
else:
ValueError('filename is None')
def normalise_timestamp(timestamp, freq):
"""Returns the nearest Timestamp to `timestamp` which would be
in the set of timestamps returned by pd.DataFrame.resample(freq=freq)
"""
timestamp = pd.Timestamp(timestamp)
series = pd.Series(np.NaN, index=[timestamp])
resampled = series.resample(freq).mean()
return resampled.index[0]
def print_on_line(*strings):
print(*strings, end="")
stdout.flush()
def append_or_extend_list(lst, value):
if value is None:
return
elif isinstance(value, list):
lst.extend(value)
else:
lst.append(value)
def convert_to_list(list_like):
return [] if list_like is None else list(list_like)
def most_common(lst):
"""Returns the most common entry in lst."""
lst = list(lst)
counts = {item: lst.count(item) for item in set(lst)}
counts = pd.Series(counts)
counts.sort()
most_common = counts.index[-1]
return most_common
def capitalise_first_letter(string):
return string[0].upper() + string[1:]
def capitalise_index(index):
labels = list(index)
for i, label in enumerate(labels):
labels[i] = capitalise_first_letter(label)
return labels
def capitalise_legend(ax):
legend_handles = ax.get_legend_handles_labels()
labels = capitalise_index(legend_handles[1])
ax.legend(legend_handles[0], labels)
return ax
def compute_rmse(ground_truth, predictions, pretty=True):
"""
Compute the RMS error between the time-series of appliance
ground truth values and predicted values.
Parameters
----------
ground_truth : `pandas.DataFrame` containing the ground truth series
for the appliances.
predictions : `pandas.DataFrame` containing the predicted time-series
for each appliance. If a appliance is present in
`ground_truth` but absent in `predictions` (or only
contains NA values), it is not listed in the output.
pretty : If `True`, tries to use the appliance labels if possible. If
a type of appliance is present more than once, resulting in
duplicate labels, building and instance number are added
to differentiate them.
Returns
-------
pandas.Series with the RMSe for each appliance
"""
# This was initially added to simplify examples, see #652.
rms_error = []
app_counts = defaultdict(int)
for app_idx, app in enumerate(ground_truth.columns):
if pretty:
try:
app_label = app.label()
except:
pretty = False
app_label = app
else:
app_label = app
gt_app = ground_truth.iloc[:, app_idx]
pred_app = predictions.iloc[:, app_idx]
if pred_app.empty:
continue
df_app = pd.DataFrame({'gt': gt_app, 'pr': pred_app}, index=gt_app.index).dropna()
if not df_app.empty:
app_rms_error = np.sqrt(mean_squared_error(df_app['gt'], df_app['pr']))
else:
app_rms_error = np.NaN
if pretty:
app_counts[app_label] += 1
rms_error.append([app, app_label, app_rms_error])
if pretty:
for current_label, current_count in app_counts.items():
if current_count < 2:
continue
# A loop should be fine for such small lists
for app_data in rms_error:
if app_data[1] != current_label:
continue
app = app_data[0]
app_data[1] = '{} ({}, {})'.format(
current_label,
app.building,
app.instance
)
return pd.Series(dict(
(item[1], item[2]) for item in rms_error
))
def safe_resample(data, **resample_kwargs):
if data.empty:
return data
def _resample_chain(data, all_resample_kwargs):
"""_resample_chain provides a compatibility function for
deprecated/removed DataFrame.resample kwargs"""
rule = all_resample_kwargs.pop('rule')
axis = all_resample_kwargs.pop('axis', None)
on = all_resample_kwargs.pop('on', None)
level = all_resample_kwargs.pop('level', None)
resample_kwargs = {}
if axis is not None: resample_kwargs['axis'] = axis
if on is not None: resample_kwargs['on'] = on
if level is not None: resample_kwargs['level'] = level
fill_method_str = all_resample_kwargs.pop('fill_method', None)
if fill_method_str:
limit = all_resample_kwargs.pop('limit', None)
fill_method = lambda df: getattr(df, fill_method_str)(limit=limit)
else:
fill_method = lambda df: df
how_str = all_resample_kwargs.pop('how', None)
if how_str:
how = lambda df: getattr(df, how_str)()
else:
how = lambda df: df
if all_resample_kwargs:
warnings.warn("Not all resample_kwargs were consumed: {}".format(repr(all_resample_kwargs)))
return fill_method(how(data.resample(rule, **resample_kwargs)))
try:
dups_in_index = data.index.duplicated(keep='first')
#TODO: remove this after validation tests are ready
if dups_in_index.any():
warnings.warn("Found duplicate index. Keeping first value")
data = data[~dups_in_index]
data = _resample_chain(data, resample_kwargs)
except pytz.AmbiguousTimeError:
# Work-around for
# https://github.com/pydata/pandas/issues/10117
tz = data.index.tz.zone
data = data.tz_convert('UTC')
data = _resample_chain(data, resample_kwargs)
data = data.tz_convert(tz)
return data
| {
"content_hash": "8f7703a7576fe89362f4ea216a3fd302",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 105,
"avg_line_length": 28.095969289827256,
"alnum_prop": 0.5865555403743681,
"repo_name": "nilmtk/nilmtk",
"id": "b70b40b9d4fcf37805dcf67fd115b32258a15bfe",
"size": "14638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nilmtk/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "Jupyter Notebook",
"bytes": "56819"
},
{
"name": "Python",
"bytes": "641879"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
} |
import logging
logging.basicConfig(level=logging.INFO)
import time
import multiprocessing as mp
from random import choice, uniform
import logstats
def important_task(stats):
logstats.thread.start(stats)
while True:
stats[choice(['A', 'B', 'C'])] += 1
time.sleep(uniform(0, 0.2))
if __name__ == '__main__':
stats = logstats.Logstats()
logstats.thread.start(stats)
for i in range(4):
p = mp.Process(target=important_task, args=(stats.get_child(), ))
p.start()
| {
"content_hash": "7c32019512ba9e67c93bfd3ae2576d5c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 20.72,
"alnum_prop": 0.640926640926641,
"repo_name": "vrde/logstats",
"id": "3a17166ccedbbe88554623675d053aa0ba783386",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/multiprocessing_watch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4793"
}
],
"symlink_target": ""
} |
__author__ = 'mmoisen'
import sys
import time
import os
from models import Probe
from datetime import datetime
import logging
LOG_FILENAME = 'ds18b20.log'
logger = logging.getLogger('test_ds18b20')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(LOG_FILENAME)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(logging.StreamHandler())
def probes():
probes = []
if len(sys.argv) == 1:
#probes = [Probe('hlt', file) for file in os.listdir(Probe.BASE_DIR) if file != 'w1_bus_master1']
files = [file for file in os.listdir(Probe.BASE_DIR) if file != 'w1_bus_master1']
else:
files = [file for file in sys.argv[1:]]
for file in files:
try:
probe = Probe('hlt', file)
probes.append(probe)
except Exception as ex:
logger.error("Failed to initialize probe {}: {} - {}".format(file, ex.__class__.__name__, ex.message))
return probes
while True:
dt = datetime.now()
print "\n",dt
for probe in probes():
try:
print "{} temperature is {}".format(probe.file_name, probe.temp)
except Exception as ex:
logger.error("Error reading {}: {} - {}".format(probe.file_name, ex.__class__.__name__, ex.message))
time.sleep(5) | {
"content_hash": "c50482e211b8af24b8347a38d0a69e52",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 114,
"avg_line_length": 29.51063829787234,
"alnum_prop": 0.6286950252343186,
"repo_name": "mkmoisen/brew",
"id": "510f2dda2b2ddee81ee8a8feb13caaa412113e5b",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_ds18b20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "20419"
},
{
"name": "JavaScript",
"bytes": "99201"
},
{
"name": "Python",
"bytes": "162545"
},
{
"name": "Smarty",
"bytes": "52785"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('useraudit', '0003_auto_20160406_1434'),
]
operations = [
migrations.AlterField(
model_name='failedloginlog',
name='user_agent',
field=models.CharField(max_length=1000, null=True, blank=True),
),
migrations.AlterField(
model_name='loginlog',
name='user_agent',
field=models.CharField(max_length=1000, null=True, blank=True),
),
]
| {
"content_hash": "4064f0b810d2289c6c5db9bb04f45407",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 26.130434782608695,
"alnum_prop": 0.5906821963394343,
"repo_name": "muccg/django-useraudit",
"id": "1d0ced47a484e3198292ed09450c7071b1f11ede",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "useraudit/migrations/0004_enlarge_user_agent_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "89"
},
{
"name": "Python",
"bytes": "68025"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
} |
from typing import Dict
from datadog_checks.base import AgentCheck
ALLOWED_RESOURCES_FOR_FILTERS = ['database', 'forest', 'host', 'server']
BASE_ENDPOINT = '/manage/v2'
RESOURCE_TYPES = {
'cluster': {'plural': 'clusters', 'singular': 'cluster', 'tag_name': 'cluster_name'},
'clusters': {'plural': 'clusters', 'singular': 'cluster', 'tag_name': 'cluster_name'},
'forest': {'plural': 'forests', 'singular': 'forest', 'tag_name': 'forest_name'},
'forests': {'plural': 'forests', 'singular': 'forest', 'tag_name': 'forest_name'},
'database': {'plural': 'databases', 'singular': 'database', 'tag_name': 'database_name'},
'databases': {'plural': 'databases', 'singular': 'database', 'tag_name': 'database_name'},
'group': {'plural': 'groups', 'singular': 'group', 'tag_name': 'group_name'},
'groups': {'plural': 'groups', 'singular': 'group', 'tag_name': 'group_name'},
'host': {'plural': 'hosts', 'singular': 'host', 'tag_name': 'marklogic_host_name'},
'hosts': {'plural': 'hosts', 'singular': 'host', 'tag_name': 'marklogic_host_name'},
'server': {'plural': 'servers', 'singular': 'server', 'tag_name': 'server_name'},
'servers': {'plural': 'servers', 'singular': 'server', 'tag_name': 'server_name'},
} # type: Dict[str, Dict]
# Storage metrics are duplications
RESOURCE_METRICS_AVAILABLE = {
'forest': {'status': True, 'storage': True, 'requests': False},
'database': {'status': True, 'storage': False, 'requests': False},
'host': {'status': True, 'storage': False, 'requests': True},
'server': {'status': False, 'storage': False, 'requests': True},
}
GAUGE_UNITS = [
'%',
'hits/sec',
'locks/sec',
'MB',
'MB/sec',
'misses/sec',
'quantity',
'quantity/sec',
'sec',
'sec/sec',
]
# The integration only checks the databases and forests health
STATE_HEALTH_MAPPER = {
"info": AgentCheck.OK,
"at-risk": AgentCheck.WARNING,
"offline": AgentCheck.WARNING,
"maintenance": AgentCheck.WARNING,
"critical": AgentCheck.CRITICAL,
}
SERVICE_CHECK_RESOURCES = ['database', 'forest']
| {
"content_hash": "af0c81e552ee61c0302b33ca48aef520",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 94,
"avg_line_length": 38.851851851851855,
"alnum_prop": 0.6120114394661582,
"repo_name": "DataDog/integrations-core",
"id": "0d46a7a0590b8d97c0adaa9ae8ec526c42d1304a",
"size": "2213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marklogic/datadog_checks/marklogic/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
import sets
import datetime
import argparse # requires Python 2.7
def indentLevel(line):
i = 0
while i < len(line) and line[i] == ' ':
i = i + 1
return i
class ParseTrace:
#
def __init__(self, OptimizerTraceFileName):
self.OptimizerTraceFileName = OptimizerTraceFileName
def parseTrace(self):
#
# The things we are looking for are:
#
# "Query to optimize:"
# This shows the start of a trace for a particular query
#
# "Task: 8 ParentTask: 7 Pass: 2 GroupId: 1 7 1 Create plan (0) ..."
# Plan fragments show up as a result of "Create plan" tasks being executed.
# Plan fragments may result in incomplete plans; we ignore those.
#
# "*** Qualified plan ***"
# This is the beginning of a qualified plan fragment, created by a "Create
# plan" task.
#
# "*** Chosen plan ***" and "*** Non-optimal plan ***"
# These indicate the end of a qualified plan fragment, and indicate whether
# the particular fragment was chosen for its group.
#
# "SQL node(<node description>)"
# This indicates a node within the plan. The tree structure of the nodes
# is indicated by the relative indentation of each of these. Python-like.
#
# "**** Roll-up Cost ****"
# This indicates that cost info for the subtree follows.
#
# "**** Operator Cost ****"
# This indicates that cost info for the node itself follows.
#
# "elapsed time = <floating point number>"
# This gives the cost, reduced to a single number.
#
# For now, we ignore the other information in the trace.
#
try:
f = open(self.OptimizerTraceFileName)
saveSQLNode = ''
state = 0
for line in f:
#print 'state = ' + str(state)
line = line.rstrip('\n') # get rid of trailing return character
if state == 0 and line == "Query to optimize:":
print "Trace for the following query:"
state = 1
elif state == 1:
if len(line) == 0:
state = 2
else:
print line
elif state == 2:
if line == "Query to optimize:":
state = 20 # ignore the rest of the file
tempLine = line.lstrip()
if tempLine.startswith('Task: ') and tempLine.find('Create plan'):
print
print tempLine
print
state = 3
elif state == 3:
tempLine = line.lstrip()
if tempLine == "*** Qualified plan ***":
state = 4
elif state == 4:
tempLine = line.lstrip()
if tempLine.startswith('SQL node('):
saveSQLNode = line
state = 5
elif state == 5:
tempLine = line.lstrip()
if tempLine.startswith('**** Roll-up Cost ****'):
state = 6
elif state == 6:
tempLine = line.lstrip()
if tempLine.startswith('elapsed time = '):
planCost = 'Plan cost ' + tempLine
print planCost
print
print saveSQLNode
n = indentLevel(line)
rollupCost = line[0:n] + 'Rollup cost ' + tempLine
print rollupCost
state = 9
elif state == 7:
tempLine = line.lstrip()
if tempLine.startswith('**** Roll-up Cost ****'):
state = 8
elif state == 8:
tempLine = line.lstrip()
if tempLine.startswith('elapsed time = '):
n = indentLevel(line)
rollupCost = line[0:n] + 'Rollup cost ' + tempLine
print rollupCost
state = 9
elif state == 9:
tempLine = line.lstrip()
if tempLine.startswith('**** Operator Cost ****'):
state = 10
elif state == 10:
tempLine = line.lstrip()
if tempLine.startswith('elapsed time = '):
n = indentLevel(line)
operatorCost = line[0:n] + 'Operator cost ' + tempLine
print operatorCost
state = 11
elif state == 11:
tempLine = line.lstrip()
if tempLine.startswith('SQL node('):
print line
state = 7
elif tempLine.startswith('Task: ') and tempLine.find('Create plan'):
print
print tempLine
print
state = 3
elif tempLine.startswith('*** Chosen plan ***') or tempLine.startswith('*** Non-optimal plan ***'):
print
print line
print
state = 2
f.close()
except IOError as detail:
print "Could not open " + self.OptimizerTraceFileName
print detail
# beginning of main
# process command line arguments
parser = argparse.ArgumentParser(
description='This script parses out interesting data from an optimizer debug trace.')
parser.add_argument("OptimizerTraceFileName", help='The name of the trace file you wish to parse.')
args = parser.parse_args() # exits and prints help if args are incorrect
exitCode = 0
Traceparser = ParseTrace(args.OptimizerTraceFileName)
Traceparser.parseTrace()
exit(exitCode)
| {
"content_hash": "97cebb7d5857ce03b2b39198f672aa36",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 119,
"avg_line_length": 36.26011560693642,
"alnum_prop": 0.46468994101705724,
"repo_name": "apache/incubator-trafodion",
"id": "f2b0268ba89ee06a4c674d4a3dda72472d96ef65",
"size": "7738",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "core/sqf/sql/scripts/analyzeOptimizerTrace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
#import pycuda.autoinit
import pycuda.driver as drv
import numpy as np
import math
from pycuda.compiler import SourceModule
from bitstring import BitStream, BitArray
from jinja2 import Environment, PackageLoader
#helper functions
def bitlength(arg):
return math.ceil( math.log( arg , 2) )
def bytelength(arg):
return math.ceil( arg / 8.0)
#Initialise data for GPU constant memory / user-defined constants - Global
NrGenomes = 1 #ADAPTED
NrTileTypes = 4 #ADAPTED
NrEdgeTypes = 8
NrTileOrientations = 4 # because square! don't change this... it might blow everything up.
NrGenerations = 1 #ADAPTED
#Initialise data for GPU constant memory / user-defined constants - FitnessKernel
DimGridX = 10 #odd dims seem to work - good because only then really centered tile
DimGridY = 10 #odd dims seem to work - good because only then really centered tile
NrFitnessFunctionGrids = 1
NrAssemblyRedundancy = 10
InteractionMatrix = np.zeros( ( NrEdgeTypes, NrEdgeTypes ) ).astype(np.float32)
WithStoreAssembledGridsInGlobalMemory = 1.0
#Initialise data for Sorting Kernel in GPU
Sort_ThreadDimX = 256
#Initialise data for GPU constant memory / user-defined constants - GAKernel
RateMutation = 4.8
ProbabilityUniformCrossover = 0.2
ProbabilitySinglePointCrossover = 0.3
WithMixedCrossover = 0.0
WithBankConflict = 1
WithNaiveRouletteWheelSelection = 1
WithAssumeNormalizedFitnessFunctionValues = 1
WithUniformCrossover = 0.0
WithSinglePointCrossover = 1
WithSurefireMutation = 1
#Parameters which probably should not be changed if you don't re-structure the whole simulation
NrMemoryBanks = 32
NrMutexe = 32
FourPermutations = np.array([ [1,2,3,4],
[1,2,4,3],
[1,3,2,4],
[1,3,4,2],
[1,4,2,3],
[1,4,3,2],
[2,1,3,4],
[2,1,4,3],
[2,3,1,4],
[2,3,4,1],
[2,4,1,3],
[2,4,3,1],
[3,2,1,4],
[3,2,4,1],
[3,1,2,4],
[3,1,4,2],
[3,4,2,1],
[3,4,1,2],
[4,2,3,1],
[4,2,1,3],
[4,3,2,1],
[4,3,1,2],
[4,1,2,3],
[4,1,3,2],]).astype(np.uint8)
NrFourPermutations = FourPermutations.shape[0] #don't change this if you don't change NrTileOrientations
#Software-Tunable parameters for CurandInitKernel
CurandInit_NrThreadsPerBlock = 32 #256.0
CurandInit_NrBlocks = 1 #int( math.ceil( float(NrGenomes) / float(CurandInit_NrThreadsPerBlock) ) )
#Software-Tunable parameters for FitnessKernel
SubgridDimX = DimGridX #ADAPTED
SubgridDimY = DimGridY #ADAPTED
ThreadDimX = 1 #Reduced to avoid out-of-resources launch fail
ThreadDimY = 1 #Reduced to avoid out-of-resources launch fail
NrSubgridsPerBank = 4 #8
Fit_NrThreadsPerBlock = ThreadDimX * ThreadDimY
LengthMovelist = 100 # for Tesla card: 100
#Software-Tunable parameters for GAKernel
ThreadDim = 256
#Initialise Parameters calculated from user-defined parameters
ByteLengthGenome = int( bytelength( 4 * bitlength(NrEdgeTypes) * NrTileTypes ) )
AlignedByteLengthGenome = 8#math.ceil( ByteLengthGenome/4 ) * 4
BitLengthGenome = int( 4 * bitlength(NrEdgeTypes) * NrTileTypes )
EdgeTypeBitLength = int( bitlength(NrEdgeTypes) )
print "ByteLengthGenome: ", ByteLengthGenome
print "AlignedByteLengthGenome: ", AlignedByteLengthGenome
print "BitLengthGenome: ", BitLengthGenome
print "EdgeTypeBitLength: ", EdgeTypeBitLength
fit_grid = (1, 1) #ADAPTED
#fit_blocks = (DimGridX, DimGridY, 1)#ADAPTED
fit_blocks = (ThreadDimX, ThreadDimY, 1) #ADAPTED
ga_grids = ( int( math.ceil( float(NrGenomes) / float(ThreadDimX) ) ), 1)
ga_blocks = (ThreadDimX, 1, 1)
GA_NrThreadsPerBlock = int( math.ceil( float(NrGenomes) / float(ThreadDimX) ) )
sorting_blocks = (Sort_ThreadDimX, 1, 1)
sorting_grids = ( int( math.ceil( float(NrGenomes) / float(Sort_ThreadDimX) ) ), 1)
#ADAPTED START
#Check validity of above parameters - FitnessKernel
#if fit_blocks[0] * fit_blocks[1] * SubgridDimX * SubgridDimY != DimGridX * DimGridY: #Check Number of cells allocated match with number of threads / grid size available
# print "FITNESS_KERNEL:Scheduled cells", fit_blocks[0] * fit_blocks[1] * SubgridDimX * SubgridDimY, " not equal to Grid Dims: ", DimGridX * DimGridY,
# quit()
#if NrSubgridsPerBank * NrMemoryBanks * SubgridDimX * SubgridDimY < DimGridX * DimGridY:
# print "Not enough SubgridsPerBank scheduled to accommodate all cells! I.e. ",NrSubgridsPerBank * NrMemoryBanks * SubgridDimX * SubgridDimY,"is smaller than ",DimGridX * DimGridY
# quit()
#ADAPTED STOP
#Main Simulation Params
GlobalParamsDict = {
"NrGenomes":int(NrGenomes),
"NrGenerations":int(NrGenerations),
"NrTileTypes":int(NrTileTypes),
"NrEdgeTypes":int(NrEdgeTypes),
"ByteLengthGenome":int(ByteLengthGenome),
"BitLengthGenome":int(BitLengthGenome),
"EdgeTypeBitLength":int(EdgeTypeBitLength),
"NrTileOrientations":int(NrTileOrientations)
}
#Fitness Simulation Params
FitnessParamsDict = {
"DimGridX":int(DimGridX),
"DimGridY":int(DimGridY),
"NrFitnessFunctionGrids":int(NrFitnessFunctionGrids),
"NrThreadsPerBlock":int(fit_blocks[0]*fit_blocks[1]*fit_blocks[2]),
"NrBlocks":int(fit_grid[0]*fit_grid[1]),
"SubgridDimX": int(SubgridDimX),
"SubgridDimY": int(SubgridDimY),
"ThreadDimX": int(ThreadDimX),
"ThreadDimY": int(ThreadDimY),
"NrSubgridsPerBank": int(NrSubgridsPerBank),
"NrFourPermutations": int(NrFourPermutations),
"NrAssemblyRedundancy": int(NrAssemblyRedundancy),
"LengthMovelist":LengthMovelist
}
#Sorting Kernel Params
#GA Simulation Params
GAParamsDict = {
"RateMutation": RateMutation, # This is Mu * L, expectation value of number of flipped bits
"ProbabilityUniformCrossover": ProbabilityUniformCrossover, # Gives the probability that crossover is point-wise (so if zero, all cross-over will be uniform)
"ProbabilitySinglePointCrossover": ProbabilitySinglePointCrossover, # Gives the probability that crossover is point-wise (so if zero, all cross-over will be uniform)
"WithMixedCrossover":WithMixedCrossover,
"WithBankConflict":WithBankConflict,
"WithNaiveRouletteWheelSelection":WithNaiveRouletteWheelSelection,
"WithAssumeNormalizedFitnessFunctionValues":WithAssumeNormalizedFitnessFunctionValues,
"WithUniformCrossover":WithUniformCrossover,
"WithSinglePointCrossover":WithSinglePointCrossover,
"WithSurefireMutation":WithSurefireMutation,
"NrThreadsPerBlock":GA_NrThreadsPerBlock,
"ThreadDim":ThreadDimX,
"WithStoreAssembledGridsInGlobalMemory":WithStoreAssembledGridsInGlobalMemory
}
#Initialise data for GPU texture memory
FitnessFunctionGrids = np.zeros((NrFitnessFunctionGrids, DimGridX, DimGridY)).astype(np.uint8)
#ADAPTED START
#FitnessFunctionGrids[0][int(DimGridX/2)][int(DimGridY/2)] = 255
#ADAPTED STOP
#Initialise data for GPU global memory
#Genomes = np.zeros( ( NrGenomes, ByteLengthGenome) ).astype(np.uint8) #ADAPTED
#001001001001 010011101011 011011011110 000000000100 #ADAPTED
#00100100 10010100 11101011 01101101 11100000 00000100 #ADAPTED
#From Iain's Paper - pyramid polyomino
#Genomes = np.array( [ [0b00100100, 0b10010100, 0b11101011, 0b01101101, 0b11100000, 0b00000100] ] ).astype(np.uint8); #ADAPTED
GenBuf = np.array( [ [0b00100100, 0b10010111, 0b01011010, 0b01101101, 0b11100000, 0b00000100, 0b00000000, 0b00000000] ] ).astype(np.uint8); #ADAPTED
Genomes = GenBuf
for i in range(0,NrGenomes):
np.concatenate((Genomes, GenBuf), axis=0)
print Genomes
#(In decimal: 36, 151, 90, 109, 224, 4)
print "Genomes initially: ", Genomes #ADAPTED TEST
#quit() #ADAPTED TEST
FitnessPartialSums = np.zeros( ( sorting_grids[0] ) ).astype(np.float32)
FitnessValues = np.zeros( (NrGenomes) ).astype(np.float32)
AssembledGrids = np.zeros( ( NrGenomes, DimGridX, DimGridY ) ).astype(np.uint8)
Mutexe = np.zeros( (NrMutexe) ).astype(np.uint32)
ReductionList = np.zeros( (sorting_grids[0]) ).astype(np.float32)
#Handles for the values in global memory
FitnessFunctionGrids_h = []
Genomes_h = None
FitnessValues_h = None
AssembledGrids_h = None
GlobalParams_h = None
FitnessParams_h = None
GAParams_h = None
CurandStates_h = None
Mutexe_h = None
FourPermutations_h = None
InteractionMatrix_h = None
FitnessSumConst_h = None
FitnessListConst_h = None
ReductionList_h = None
#Storing the kernel source
KernelFile = "./kernel.cu"
KernelSourceModule = None
def main():
#Initialise InteractionMatrix
def Delta(a,b):
if a==b:
return 1
else:
return 0
for i in range(InteractionMatrix.shape[0]):
for j in range(InteractionMatrix.shape[1]):
InteractionMatrix[i][j] = ( 1 - i % 2 ) * Delta( i, j+1 ) + ( i % 2 ) * Delta( i, j-1 )
#Initialise GPU (equivalent of autoinit)
drv.init()
assert drv.Device.count() >= 1
dev = drv.Device(0)
ctx = dev.make_context(0)
#Initialise data for templates - Device Information
WarpSize = drv.Device(0).get_attribute(drv.device_attribute.WARP_SIZE)
print WarpSize
#Convert GlobalParams to List
GlobalParams = np.zeros(len(GlobalParamsDict.values())).astype(np.float32)
count = 0
for x in GlobalParamsDict.keys():
GlobalParams[count] = GlobalParamsDict[x]
count += 1
#Convert FitnessParams to List
FitnessParams = np.zeros(len(FitnessParamsDict.values())).astype(np.float32)
count = 0
for x in FitnessParamsDict.keys():
FitnessParams[count] = FitnessParamsDict[x]
count += 1
#Convert GAParams to List
GAParams = np.zeros(len(GAParamsDict.values())).astype(np.float32)
count = 0
for x in GAParamsDict.keys():
GAParams[count] = GAParamsDict[x]
count += 1
# Set environment for template package Jinja2
env = Environment(loader=PackageLoader('main', '../../templates'))
# Load source code from file
Source = env.get_template('./kernel.cu') #Template( file(KernelFile).read() )
#Create dictionary argument for rendering
RenderArgs= {"params_size":GlobalParams.nbytes,\
"fitnessparams_size":FitnessParams.nbytes,\
"gaparams_size":GAParams.nbytes,\
"genome_bytelength":int(ByteLengthGenome),\
"genome_alignedbytelength":int(AlignedByteLengthGenome),\
"genome_bitlength":int(BitLengthGenome),\
"ga_nr_threadsperblock":GA_NrThreadsPerBlock,\
"textures":range( 0, NrFitnessFunctionGrids ),\
"curandinit_nr_threadsperblock":CurandInit_NrThreadsPerBlock,\
"with_mixed_crossover":WithMixedCrossover,
"with_bank_conflict":WithBankConflict,
"with_naive_roulette_wheel_selection":WithNaiveRouletteWheelSelection,
"with_assume_normalized_fitness_function_values":WithAssumeNormalizedFitnessFunctionValues,
"with_uniform_crossover":WithUniformCrossover,
"with_single_point_crossover":WithSinglePointCrossover,
"with_surefire_mutation":WithSurefireMutation,
"with_storeassembledgridsinglobalmemory":WithStoreAssembledGridsInGlobalMemory,
"ga_threaddimx":int(ThreadDim),
"glob_nr_tiletypes":int(NrTileTypes),
"glob_nr_edgetypes":int(NrEdgeTypes),
"glob_nr_tileorientations":int(NrTileOrientations),
"fit_dimgridx":int(DimGridX),
"fit_dimgridy":int(DimGridY),
"fit_nr_fitnessfunctiongrids":int(NrFitnessFunctionGrids),
"fit_nr_fourpermutations":int(NrFourPermutations),
"fit_assembly_redundancy":int(NrAssemblyRedundancy),
"fit_nr_threadsperblock":int(Fit_NrThreadsPerBlock),
"sort_threaddimx":int(Sort_ThreadDimX),
"glob_nr_genomes":int(NrGenomes),
"fit_dimthreadx":int(ThreadDimX),
"fit_dimthready":int(ThreadDimY),
"fit_dimsubgridx":int(SubgridDimX),
"fit_dimsubgridy":int(SubgridDimY),
"fit_nr_subgridsperbank":int(NrSubgridsPerBank),
"glob_bitlength_edgetype":int(EdgeTypeBitLength),
"WarpSize":int(WarpSize),
"fit_lengthmovelist":int(LengthMovelist),
"fit_blockdimx":int(fit_grid[0])
}
# Render source code
RenderedSource = Source.render( RenderArgs )
# Save rendered source code to file
f = open('./rendered.cu', 'w')
f.write(RenderedSource)
f.close()
#Load source code into module
KernelSourceModule = SourceModule(RenderedSource, options=None, no_extern_c=True, arch="compute_20", code="sm_20", cache_dir=None)
#Allocate values on GPU
Genomes_h = drv.mem_alloc(Genomes.nbytes)
FitnessPartialSums_h = drv.mem_alloc(FitnessPartialSums.nbytes)
FitnessValues_h = drv.mem_alloc(FitnessValues.nbytes)
AssembledGrids_h = drv.mem_alloc(AssembledGrids.nbytes)
Mutexe_h = drv.mem_alloc(Mutexe.nbytes)
ReductionList_h = drv.mem_alloc(ReductionList.nbytes)
#Copy values to global memory
drv.memcpy_htod(Genomes_h, Genomes)
drv.memcpy_htod(FitnessPartialSums_h, FitnessPartialSums)
drv.memcpy_htod(FitnessValues_h, FitnessValues)
drv.memcpy_htod(AssembledGrids_h, AssembledGrids)
drv.memcpy_htod(Mutexe_h, Mutexe)
#Copy values to constant / texture memory
for id in range(0, NrFitnessFunctionGrids):
FitnessFunctionGrids_h.append( KernelSourceModule.get_texref("t_ucFitnessFunctionGrids%d"%(id)) )
drv.matrix_to_texref( FitnessFunctionGrids[id], FitnessFunctionGrids_h[id] , order="C")
InteractionMatrix_h = KernelSourceModule.get_texref("t_ucInteractionMatrix")
drv.matrix_to_texref( InteractionMatrix, InteractionMatrix_h , order="C")
GlobalParams_h = KernelSourceModule.get_global("c_fParams") # Constant memory address
drv.memcpy_htod(GlobalParams_h[0], GlobalParams)
FitnessParams_h = KernelSourceModule.get_global("c_fFitnessParams") # Constant memory address
drv.memcpy_htod(FitnessParams_h[0], FitnessParams)
GAParams_h = KernelSourceModule.get_global("c_fGAParams") # Constant memory address
drv.memcpy_htod(GAParams_h[0], GAParams)
FourPermutations_h = KernelSourceModule.get_global("c_ucFourPermutations") # Constant memory address
drv.memcpy_htod(FourPermutations_h[0], FourPermutations)
FitnessSumConst_h = KernelSourceModule.get_global("c_fFitnessSumConst")
FitnessListConst_h = KernelSourceModule.get_global("c_fFitnessListConst")
#Set up curandStates
curandState_bytesize = 40 # This might be incorrect, depending on your compiler (info from Tomasz Rybak's pyCUDA cuRAND wrapper)
CurandStates_h = drv.mem_alloc(curandState_bytesize * NrGenomes)
#Compile kernels
curandinit_fnc = KernelSourceModule.get_function("CurandInitKernel")
fitness_fnc = KernelSourceModule.get_function("FitnessKernel")
#sorting_fnc = KernelSourceModule.get_function("SortingKernel")
#ga_fnc = KernelSourceModule.get_function("GAKernel")
#Initialise Curand
curandinit_fnc(CurandStates_h, block=(int(CurandInit_NrThreadsPerBlock), 1, 1), grid=(int(CurandInit_NrBlocks), 1))
#Build parameter lists for FitnessKernel and GAKernel
FitnessKernelParams = (Genomes_h, FitnessValues_h, AssembledGrids_h, CurandStates_h, Mutexe_h); #TEST
#FitnessKernelParams = ()
SortingKernelParams = (FitnessValues_h, FitnessPartialSums_h)
GAKernelParams = (Genomes_h, FitnessValues_h, AssembledGrids_h, CurandStates_h);
#TEST ONLY
#return #ADAPTED
#TEST ONLY
#Initialise CUDA timers
start = drv.Event()
stop = drv.Event()
#START ADAPTED
print "GENOMES NOW:\n"
print Genomes
print ":::STARTING KERNEL EXECUTION:::"
#STOP ADAPTED
#execute kernels for specified number of generations
start.record()
for gen in range(0, GlobalParamsDict["NrGenerations"]):
#print "Processing Generation: %d"%(gen)
fitness_fnc(*(FitnessKernelParams), block=fit_blocks, grid=fit_grid)
#Launch CPU processing (should be asynchroneous calls)
#sorting_fnc(*(SortingKernelParams), block=sorting_blocks, grid=sorting_grids) #Launch Sorting Kernel
#drv.memcpy_dtoh(ReductionList, ReductionList_h) #Copy from Device to Host and finish sorting
#FitnessSumConst = ReductionList.sum()
#drv.memcpy_htod(FitnessSumConst_h[0], FitnessSumConst) #Copy from Host to Device constant memory
#drv.memcpy_dtod(FitnessListConst_h[0], FitnessValues_h, FitnessValues.nbytes) #Copy FitneValues from Device to Device Const
#ga_fnc(*(GAKernelParams), block=ga_blocks, grid=ga_grids)
drv.memcpy_dtoh(Genomes, Genomes_h) #Copy data from GPU
drv.memcpy_dtoh(FitnessValues, FitnessValues_h)
drv.memcpy_dtoh(AssembledGrids, AssembledGrids_h)
stop.record()
stop.synchronize()
print "Total kernel time taken: %fs"%(start.time_till(stop)*1e-3)
print "Mean time per generation: %fs"%(start.time_till(stop)*1e-3 / NrGenerations)
pass
print "GENOMES NOW:\n"
#for i in range(0, ByteLengthGenome):
# print "|",
# print str(Genomes[0][i]),
#print Genomes
#print InteractionMatrix
print "PRINT ASSEMBLED GRIDS"
#for i in range(0, len(AssembledGrids)):# len(AssembledGrids)):
# print "Assembly %d: --------------"%(i)
# print AssembledGrids[i]
#print AssembledGrids[0]
#print "PRINT FITNESS VALUES"
#print FitnessValues
#print "PRINT FITNESS GRID 0"
#print FitnessFunctionGrids[0]
if __name__ == '__main__':
main()
| {
"content_hash": "22e09e693e6aa44f238a9d572eaca93e",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 182,
"avg_line_length": 42.942129629629626,
"alnum_prop": 0.6679963344294109,
"repo_name": "schroeder-dewitt/polyomino-self-assembly",
"id": "68060842449904935019e23b55b334bf55a8af19",
"size": "18944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/SinglePolyomino/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "876"
},
{
"name": "Cuda",
"bytes": "6859317"
},
{
"name": "Python",
"bytes": "1683207"
}
],
"symlink_target": ""
} |
"""The volumes snapshots api."""
from oslo.utils import strutils
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder import utils
from cinder import volume
LOG = logging.getLogger(__name__)
def _translate_snapshot_detail_view(context, snapshot):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, snapshot)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot['id']
d['created_at'] = snapshot['created_at']
d['name'] = snapshot['display_name']
d['description'] = snapshot['display_description']
d['volume_id'] = snapshot['volume_id']
d['status'] = snapshot['status']
d['size'] = snapshot['volume_size']
if snapshot.get('snapshot_metadata'):
metadata = snapshot.get('snapshot_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
# avoid circular ref when vol is a Volume instance
elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'),
dict):
d['metadata'] = snapshot['metadata']
else:
d['metadata'] = {}
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('volume_id')
elem.append(common.MetadataTemplate())
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self, ext_mgr=None):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(SnapshotsController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['cinder.context']
try:
snapshot = self.volume_api.get_snapshot(context, id)
req.cache_db_snapshot(snapshot)
except exception.NotFound:
msg = _("Snapshot could not be found")
raise exc.HTTPNotFound(explanation=msg)
return {'snapshot': _translate_snapshot_detail_view(context, snapshot)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
except exception.NotFound:
msg = _("Snapshot could not be found")
raise exc.HTTPNotFound(explanation=msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['cinder.context']
#pop out limit and offset , they are not search_opts
search_opts = req.GET.copy()
search_opts.pop('limit', None)
search_opts.pop('offset', None)
#filter out invalid option
allowed_search_options = ('status', 'volume_id', 'name')
utils.remove_invalid_filter_options(context, search_opts,
allowed_search_options)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in search_opts:
search_opts['display_name'] = search_opts['name']
del search_opts['name']
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts)
limited_list = common.limited(snapshots, req)
req.cache_db_snapshots(limited_list)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.response(202)
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
kwargs = {}
context = req.environ['cinder.context']
if not self.is_valid_body(body, 'snapshot'):
msg = (_("Missing required element '%s' in request body") %
'snapshot')
raise exc.HTTPBadRequest(explanation=msg)
snapshot = body['snapshot']
kwargs['metadata'] = snapshot.get('metadata', None)
try:
volume_id = snapshot['volume_id']
except KeyError:
msg = _("'volume_id' must be specified")
raise exc.HTTPBadRequest(explanation=msg)
try:
volume = self.volume_api.get(context, volume_id)
except exception.NotFound:
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
force = snapshot.get('force', False)
msg = _("Create snapshot from volume %s")
LOG.info(msg, volume_id, context=context)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot.get('name')
del snapshot['name']
if not utils.is_valid_boolstr(force):
msg = _("Invalid value '%s' for force. ") % force
raise exception.InvalidParameterValue(err=msg)
if strutils.bool_from_string(force):
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
else:
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
req.cache_db_snapshot(new_snapshot)
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
@wsgi.serializers(xml=SnapshotTemplate)
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['cinder.context']
if not body:
msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=msg)
if 'snapshot' not in body:
msg = (_("Missing required element '%s' in request body") %
'snapshot')
raise exc.HTTPBadRequest(explanation=msg)
snapshot = body['snapshot']
update_dict = {}
valid_update_keys = (
'name',
'description',
'display_name',
'display_description',
)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot['name']
del snapshot['name']
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in snapshot:
snapshot['display_description'] = snapshot['description']
del snapshot['description']
for key in valid_update_keys:
if key in snapshot:
update_dict[key] = snapshot[key]
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.update_snapshot(context, snapshot, update_dict)
except exception.NotFound:
msg = _("Snapshot could not be found")
raise exc.HTTPNotFound(explanation=msg)
snapshot.update(update_dict)
req.cache_db_snapshot(snapshot)
return {'snapshot': _translate_snapshot_detail_view(context, snapshot)}
def create_resource(ext_mgr):
return wsgi.Resource(SnapshotsController(ext_mgr))
| {
"content_hash": "f7459cf94fbb373eb462a6053e2f28b8",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 79,
"avg_line_length": 34,
"alnum_prop": 0.6007769145394006,
"repo_name": "hguemar/cinder",
"id": "64d1d28436724df1df814393b20084288e26703c",
"size": "9646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/api/v2/snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10010542"
},
{
"name": "Shell",
"bytes": "9917"
}
],
"symlink_target": ""
} |
class TestLimitNew:
config = """
tasks:
test:
mock:
- {title: 'Item 1'}
- {title: 'Item 2'}
- {title: 'Item 3'}
- {title: 'Item 4'}
accept_all: yes
limit_new: 1
"""
def test_limit_new(self, execute_task):
task = execute_task('test')
assert len(task.entries) == 1, 'accepted too many'
assert task.find_entry('accepted', title='Item 1'), 'accepted wrong item'
task = execute_task('test')
assert len(task.entries) == 1, 'accepted too many on second run'
assert task.find_entry('accepted', title='Item 2'), 'accepted wrong item on second run'
| {
"content_hash": "56c911ae1ec34876c800f460f05928b2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 95,
"avg_line_length": 35.5,
"alnum_prop": 0.5169014084507042,
"repo_name": "crawln45/Flexget",
"id": "ab3783dfe5046339dcf0f59477614b2fbe3d742f",
"size": "710",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_limit_new.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1233"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3761134"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1586"
}
],
"symlink_target": ""
} |
import os
import ycm_core
from clang_helpers import PrepareClangFlags
SOURCE_EXTENSIONS = {
"c++": (
".cpp",
".cxx"
),
"c": (
".c",
)
}
PATH_FLAGS = (
'-isystem',
'-I',
'-iquote',
'--sysroot='
)
def find_file(root, filename):
for root, _, files in os.walk(root):
for f in files:
if filename == f:
return os.path.join(root, f)
def find_source_for_header(header):
header_folder, header_name = os.path.split(header)
base_name, header_ext = os.path.splitext(header_name)
if header_ext not in (".h", ".hpp"):
return header, []
for lang in SOURCE_EXTENSIONS:
for source_ext in SOURCE_EXTENSIONS[lang]:
source_name = base_name + source_ext
source = find_file(header_folder, source_name)
if source is not None:
return source, ["-x", lang]
return header, []
class YcmFlags:
"""Flags generator for YouCompleteMe vim plugin"""
def __init__(self, absolute_project_path=None, flags=None,
additional_includes=None, default_file=None, build_path="build/"):
"""See `README.md` for information about options"""
if absolute_project_path is None:
raise NameError('Please set up the `absolute_project_path` argument.')
self._flags = flags if flags is not None else []
self._default_file = default_file if default_file is not None else ()
if isinstance(additional_includes, (tuple, list)):
self._flags.extend(
[["-I", include] for include in additional_includes])
self._project_path = os.path.abspath(absolute_project_path)
self._compilation_db_path = os.path.join(self._project_path, build_path)
if os.path.exists(self._compilation_db_path):
self._db = ycm_core.CompilationDatabase(self._compilation_db_path)
if not self._db:
raise NameError('Failed to prepare a compilation DB')
else:
raise NameError('No compilation DB!')
@staticmethod
def relative_to_absolute(flags, absolute_path):
new_flags = []
make_next_absolute = False
for flag in flags:
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(absolute_path, flag)
else:
new_flag = flag
new_flags.append(os.path.normpath(new_flag))
continue
new_flag = flag
for path_flag in PATH_FLAGS:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.normpath(
os.path.join(absolute_path, path))
break
new_flags.append(new_flag)
return new_flags
def flags_for_default_file(self):
if not self._default_file:
raise NameError("No default flag set, so no flags extracted")
source, flags = self._default_file
compilation_info = self._db.GetCompilationInfoForFile(
os.path.join(self._project_path, source))
return (
compilation_info,
self.relative_to_absolute(flags, self._project_path)
)
def flags_for_file(self, filename):
additional_flags = []
source, extra_flags = find_source_for_header(filename)
compilation_info = self._db.GetCompilationInfoForFile(source)
if not compilation_info.compiler_flags_:
compilation_info, additional_flags = self.flags_for_default_file()
additional_flags.extend(
self.relative_to_absolute(self._flags, self._project_path))
flags = PrepareClangFlags(
self.relative_to_absolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
source)
return {
'flags': flags + additional_flags + extra_flags,
'do_cache': True
}
| {
"content_hash": "151ab979dbafae282ae44f582c4b529c",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 83,
"avg_line_length": 32.48854961832061,
"alnum_prop": 0.5634398496240601,
"repo_name": "mexus/ycm-flags",
"id": "d469c725f40684a433737fd820ee74d8f39fcff0",
"size": "4281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ycmflags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4296"
}
],
"symlink_target": ""
} |
import sys, codecs, argparse
from nltk import word_tokenize
import javabridge
import gc
import regex as re
from termcolor import cprint
def get_decompounder():
"""
Restarts the JVM with the decompounder. It is necessary once in a while.
"""
javabridge.start_vm(class_path=["tf/jwordsplitter/target/jwordsplitter-4.2-SNAPSHOT.jar"])
java_instance = javabridge.make_instance("de/danielnaber/jwordsplitter/GermanWordSplitter", "(Z)V", False)
decompounder = javabridge.JWrapper(java_instance)
return decompounder
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tokenizes and decompounds the text on stdin.")
parser.add_argument("--language", required=True, help="Lanuage the text is in.")
args = parser.parse_args()
if args.language not in set(['english', 'german']):
raise Exception("Language must be 'english' or 'german', not '{}'.".format(args.language))
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
try:
if args.language == "german":
decompounder = get_decompounder()
decompounded_count = 0
for ln, line in enumerate(sys.stdin):
line = re.sub(r"[[:space:]]+", " ", line.rstrip())
line = re.sub(r"^[[:space:]]+", "", line)
line = re.sub(r"''", "\"", line)
line = re.sub(r"``", "\"", line)
line = re.sub(r"-([[:punct:]\$])", "\g<1>", line)
line = re.sub(r"([[:punct:]\$])-", "\g<1>", line)
line = re.sub(r"^[[:space:]]*-[[:space:]]", "", line)
line = re.sub(r"([[:alpha:]0-9ß])-([ [:punct:]])", "\g<1>\g<2>", line, re.UNICODE)
line = re.sub(r"([ [:punct:]])-([[:alpha:]0-9ß])", "\g<1>\g<2>", line, re.UNICODE)
line = re.sub(r" - ", " – ", line)
line = re.sub(r"– -", "–", line)
def normalize_quotes(token):
token = re.sub(r"-$", '', token)
token = re.sub(r"``", '\u201c', token)
token = re.sub(r"''", '\u201d', token)
return token
tokenized = [normalize_quotes(t) for t in word_tokenize(line, language=args.language)]
if args.language == "german":
for i, token in enumerate(tokenized):
decompounded_count += 1
decompounded = decompounder.splitWord(token)
if decompounded.size() >= 2:
tokenized[i] = \
"#".join([decompounded.get(j) for j in range(decompounded.size())])
del decompounded
if token.endswith("s") and not tokenized[i].endswith("s"):
tokenized[i] += "s"
# we need to manually garbage collect because of Java Heap Space
if decompounded_count % 150 == 0:
gc.collect()
tokenized_string = ' '.join(tokenized)
# Now put special character for spaces introduced by the tokenizer
original_i = 0
tokenized_chars_result = []
for tokenized_i, char in enumerate(tokenized_string):
#print u"pair '{}' ({}) and '{}' ({})".format(char, ord(char), line[original_i], ord(line[original_i]))
if char == line[original_i] or (char == " " and ord(line[original_i]) == 160):
tokenized_chars_result.append(char)
original_i += 1
#print u"same characters {}".format(char)
elif line[original_i] == '"' and (char == '\u201c' or char == '\u201d'):
original_i += 1
#print u"quotation mark {}".format(char)
elif char == " ":
tokenized_chars_result.append("@")
#print "space added by tokenizer"
elif char == "#":
if line[original_i] == "-":
tokenized_chars_result.append("-")
original_i += 1
else:
if args.language == 'german' and \
(line[original_i] == "s" or line[original_i] == "S") \
and line[original_i + 1] == tokenized_string[tokenized_i + 1]:
original_i += 1
tokenized_chars_result.append("$")
#print "decompounded with inserted s"
if args.language == 'german' and line[original_i] == "s" and line[original_i + 1] == "-":
original_i += 2
tokenized_chars_result.append("-")
else:
#print "decompompounded"
tokenized_chars_result.append("#")
else:
#print ""
#print "Error on line {}".format(ln)
#cprint(u"tokenized on index {}: \"{}\", original on index {}: \"{}\""\
# .format(tokenized_i, char, original_i, line[original_i]), 'yellow')
#cprint(line, 'red')
#cprint(tokenized_string, 'red')
#javabridge.kill_vm()
#exit()
tokenized_chars_result = list("<ERROR>")
break
print("".join(tokenized_chars_result))
except:
javabridge.kill_vm()
exit(1)
finally:
javabridge.kill_vm()
| {
"content_hash": "5b588a70393405770d7c9a1bdd429d29",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 119,
"avg_line_length": 46.4344262295082,
"alnum_prop": 0.48314210061782875,
"repo_name": "ufal/neuralmonkey",
"id": "d6b3f8efc0293013c35be59703a9b282c4f15a8a",
"size": "5721",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/tokenize_data.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13780"
},
{
"name": "HTML",
"bytes": "3116"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mask",
"bytes": "69384"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "823152"
},
{
"name": "Shell",
"bytes": "4671"
}
],
"symlink_target": ""
} |
"""
Brocade backend.
Contributed by Balasubramania Pillai from MAX Gigapop.
Ported to OpenNSA NSIv2 by Henrik Thostrup Jensen (summer 2013)
Further contributions/fixes from Jeronimo Aguiar from AMPATH.
Notes:
configure terminal
vlan $vlan_id name $name
tagged $source_port
tagged $dest_port
end
Teardown:
configure terminal
no vlan $vlan_id
end
"""
import string
import random
from twisted.python import log
from twisted.internet import defer
from opennsa import constants as cnt, config
from opennsa.backends.common import ssh, genericbackend
LOG_SYSTEM = 'opennsa.brocade'
COMMAND_PRIVILEGE = 'enable %s'
COMMAND_CONFIGURE = 'configure terminal'
COMMAND_END = 'end'
COMMAND_VLAN = 'vlan %(vlan)i name %(name)s'
#COMMAND_TAGGED = 'tagged %(port)s'
COMMAND_TAGGED = 'tagged ethernet %(port)s'
COMMAND_NO_VLAN = 'no vlan %(vlan)i'
def _portToInterfaceVLAN(nrm_port):
port, vlan = nrm_port.split('.')
vlan = int(vlan)
return port, vlan
def _createSetupCommands(source_nrm_port, dest_nrm_port):
log.msg('_createSetupCommands: src %s dst %s' % (source_nrm_port, dest_nrm_port))
s_port, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_port, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
log.msg('_createSetupCommands: src %s %s dst %s %s' % (s_port, s_vlan, d_port, d_vlan))
name = 'opennsa-%i' % s_vlan
cmd_vlan = COMMAND_VLAN % { 'vlan' : s_vlan, 'name' : name }
cmd_s_intf = COMMAND_TAGGED % { 'port' : s_port }
cmd_d_intf = COMMAND_TAGGED % { 'port' : d_port }
commands = [ cmd_vlan, cmd_s_intf, cmd_d_intf ]
log.msg('_createSetupCommands: commands %s' % (commands))
return commands
def _createTeardownCommands(source_nrm_port, dest_nrm_port):
s_port, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_port, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
cmd_no_intf = COMMAND_NO_VLAN % { 'vlan' : s_vlan }
commands = [ cmd_no_intf ]
return commands
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.data = ''
self.wait_defer = None
self.wait_data = None
@defer.inlineCallbacks
def sendCommands(self, commands, enable_password):
LT = '\r' # line termination
try:
log.msg('Requesting shell for sending commands', debug=True, system=LOG_SYSTEM)
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
d = self.waitForData('>')
self.write(COMMAND_PRIVILEGE % enable_password + LT)
yield d
log.msg('Entered privileged mode', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(cmd + LT)
yield d
# not quite sure how to handle failure here
log.msg('Commands send, sending end command.', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_END + LT)
yield d
except Exception, e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully send', debug=True, system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForData(self, data):
self.wait_data = data
self.wait_defer = defer.Deferred()
return self.wait_defer
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.data += data
if self.wait_data and self.wait_data in self.data:
d = self.wait_defer
self.data = ''
self.wait_data = None
self.wait_defer = None
d.callback(self)
class BrocadeCommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path, enable_password):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
self.enable_password = enable_password
@defer.inlineCallbacks
def sendCommands(self, commands):
# Open a connection for each request
# This is done due to the code being based on the Force10 backend
# It is currently unknown if the Brocade SSH implementation
# supports multiple ssh channels.
log.msg('Creating new SSH connection', debug=True, system=LOG_SYSTEM)
ssh_connection = yield self.ssh_connection_creator.getSSHConnection()
try:
channel = SSHChannel(conn=ssh_connection)
ssh_connection.openChannel(channel)
yield channel.channel_open
yield channel.sendCommands(commands, self.enable_password)
finally:
ssh_connection.transport.loseConnection()
class BrocadeConnectionManager:
def __init__(self, log_system, port_map, cfg):
self.log_system = log_system
self.port_map = port_map
host = cfg[config.BROCADE_HOST]
port = cfg.get(config.BROCADE_PORT, 22)
host_fingerprint = cfg[config.BROCADE_HOST_FINGERPRINT]
user = cfg[config.BROCADE_USER]
ssh_public_key = cfg[config.BROCADE_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.BROCADE_SSH_PRIVATE_KEY]
enable_password = cfg[config.BROCADE_ENABLE_PASSWORD]
self.command_sender = BrocadeCommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key, enable_password)
def getResource(self, port, label_type, label_value):
assert label_type == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return str(label_value)
def getTarget(self, port, label_type, label_value):
return self.port_map[port] + '.' + label_value
def createConnectionId(self, source_target, dest_target):
return 'B-' + ''.join( [ random.choice(string.hexdigits[:16]) for _ in range(10) ] )
def canSwapLabel(self, label_type):
return False
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
def linkUp(pt):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createSetupCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
def linkDown(pt):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createTeardownCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkDown)
return d
def BrocadeBackend(network_name, network_topology, parent_requester, port_map, configuration):
name = 'Brocade %s' % network_name
cm = BrocadeConnectionManager(name, port_map, configuration)
return genericbackend.GenericBackend(network_name, network_topology, cm, parent_requester, name)
| {
"content_hash": "c651e60fa848efb02370133bc494681b",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 136,
"avg_line_length": 29.926356589147286,
"alnum_prop": 0.628286491387126,
"repo_name": "jab1982/opennsa",
"id": "e0d0f77cefdc0d762c2c30d242997e2d80296a8b",
"size": "7721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opennsa/backends/brocade.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "133"
},
{
"name": "Python",
"bytes": "538009"
},
{
"name": "Shell",
"bytes": "1581"
},
{
"name": "Web Ontology Language",
"bytes": "14166"
}
],
"symlink_target": ""
} |
import timeit, numpy
array_size = 1e6
iterations = 10
# Choose the type you want to benchmark
#dtype = 'int8'
#dtype = 'int16'
#dtype = 'int32'
#dtype = 'int64'
dtype = 'float32'
#dtype = 'float64'
def compare_times(setup, expr):
print "Expression:", expr
namespace = {}
exec setup in namespace
numpy_timer = timeit.Timer(expr, setup)
numpy_time = numpy_timer.timeit(number=iterations)
print 'numpy:', numpy_time / iterations
try:
weave_timer = timeit.Timer('blitz("result=%s")' % expr, setup)
weave_time = weave_timer.timeit(number=iterations)
print "Weave:", weave_time/iterations
print "Speed-up of weave over numpy:", round(numpy_time/weave_time, 2)
except:
print "Skipping weave timing"
numexpr_timer = timeit.Timer('evaluate("%s", optimization="aggressive")' % expr, setup)
numexpr_time = numexpr_timer.timeit(number=iterations)
print "numexpr:", numexpr_time/iterations
tratio = numpy_time/numexpr_time
print "Speed-up of numexpr over numpy:", round(tratio, 2)
return tratio
setup1 = """\
from numpy import arange
try: from scipy.weave import blitz
except: pass
from numexpr import evaluate
result = arange(%f, dtype='%s')
b = arange(%f, dtype='%s')
c = arange(%f, dtype='%s')
d = arange(%f, dtype='%s')
e = arange(%f, dtype='%s')
""" % ((array_size, dtype)*5)
expr1 = 'b*c+d*e'
setup2 = """\
from numpy import arange
try: from scipy.weave import blitz
except: pass
from numexpr import evaluate
a = arange(%f, dtype='%s')
b = arange(%f, dtype='%s')
result = arange(%f, dtype='%s')
""" % ((array_size, dtype)*3)
expr2 = '2*a+3*b'
setup3 = """\
from numpy import arange, sin, cos, sinh
try: from scipy.weave import blitz
except: pass
from numexpr import evaluate
a = arange(2*%f, dtype='%s')[::2]
b = arange(%f, dtype='%s')
result = arange(%f, dtype='%s')
""" % ((array_size, dtype)*3)
expr3 = '2*a + (cos(3)+5)*sinh(cos(b))'
setup4 = """\
from numpy import arange, sin, cos, sinh, arctan2
try: from scipy.weave import blitz
except: pass
from numexpr import evaluate
a = arange(2*%f, dtype='%s')[::2]
b = arange(%f, dtype='%s')
result = arange(%f, dtype='%s')
""" % ((array_size, dtype)*3)
expr4 = '2*a + arctan2(a, b)'
setup5 = """\
from numpy import arange, sin, cos, sinh, arctan2, sqrt, where
try: from scipy.weave import blitz
except: pass
from numexpr import evaluate
a = arange(2*%f, dtype='%s')[::2]
b = arange(%f, dtype='%s')
result = arange(%f, dtype='%s')
""" % ((array_size, dtype)*3)
expr5 = 'where(0.1*a > arctan2(a, b), 2*a, arctan2(a,b))'
expr6 = 'where(a != 0.0, 2, b)'
expr7 = 'where(a-10 != 0.0, a, 2)'
expr8 = 'where(a%2 != 0.0, b+5, 2)'
expr9 = 'where(a%2 != 0.0, 2, b+5)'
expr10 = 'a**2 + (b+1)**-2.5'
expr11 = '(a+1)**50'
expr12 = 'sqrt(a**2 + b**2)'
def compare(check_only=False):
experiments = [(setup1, expr1), (setup2, expr2), (setup3, expr3),
(setup4, expr4), (setup5, expr5), (setup5, expr6),
(setup5, expr7), (setup5, expr8), (setup5, expr9),
(setup5, expr10), (setup5, expr11), (setup5, expr12),
]
total = 0
for params in experiments:
total += compare_times(*params)
print
average = total / len(experiments)
print "Average =", round(average, 2)
return average
if __name__ == '__main__':
import numexpr
numexpr.print_versions()
averages = []
for i in range(iterations):
averages.append(compare())
print "Averages:", ', '.join("%.2f" % x for x in averages)
| {
"content_hash": "0d7d764e4f3cd5b4ff8211897a66eaee",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 91,
"avg_line_length": 26.348148148148148,
"alnum_prop": 0.6123137475400618,
"repo_name": "jsalvatier/numexpr",
"id": "7c310faf3f3169c6cfef1afde539f5aec2d9f90f",
"size": "3557",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bench/timing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "88197"
},
{
"name": "Python",
"bytes": "116081"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from PySide import QtGui
from PySide import QtCore
import os
from .constants import EXTENSION
from .widgets import TagCompletion
import noteorganiser.text_processing as tp
class Dialog(QtGui.QDialog):
"""
Model for dialogs in Note Organiser (pop-up windows)
"""
def __init__(self, parent=None):
"""Define the shortcuts"""
QtGui.QDialog.__init__(self, parent)
self.parent = parent
self.info = parent.info
self.log = parent.log
# Define Ctrl+W to close it, and overwrite Esc
QtGui.QShortcut(QtGui.QKeySequence('Ctrl+W'),
self, self.clean_reject)
QtGui.QShortcut(QtGui.QKeySequence('Esc'),
self, self.clean_reject)
self.setLayout(QtGui.QVBoxLayout())
def clean_accept(self):
"""Logging the closing of the popup"""
self.log.info("%s form suceeded!" % self.__class__.__name__)
self.accept()
def clean_reject(self):
"""Logging the rejection of the popup"""
self.log.info("Aborting %s form" % self.__class__.__name__)
self.reject()
def translate(self, string):
"""Temporary fix for unicode tr problems"""
return string
class NewNotebook(Dialog):
def __init__(self, parent=None):
Dialog.__init__(self, parent)
self.names = [os.path.splitext(elem)[0]
for elem in self.info.notebooks]
self.initUI()
def initUI(self):
self.log.info("Creating a 'New Notebook' form")
self.setWindowTitle("New notebook")
# Define the fields:
# Name (text field)
# type (so far, standard)
formLayout = QtGui.QFormLayout()
self.nameLineEdit = QtGui.QLineEdit()
# Query the type of notebook
self.notebookType = QtGui.QComboBox()
self.notebookType.addItem("Standard")
formLayout.addRow(self.translate("Notebook's &name:"), self.nameLineEdit)
formLayout.addRow(self.translate("&Notebook's &type:"), self.notebookType)
self.layout().addLayout(formLayout)
hboxLayout = QtGui.QHBoxLayout()
# Add the "Create" button, as a confirmation, and the "Cancel" one
self.createButton = QtGui.QPushButton("&Create")
self.createButton.clicked.connect(self.createNotebook)
self.cancelButton = QtGui.QPushButton("C&ancel")
self.cancelButton.clicked.connect(self.clean_reject)
# Add a spacer before so that the button do not stretch
hboxLayout.addStretch()
hboxLayout.addWidget(self.createButton)
hboxLayout.addWidget(self.cancelButton)
self.layout().addLayout(hboxLayout)
# Create a status bar
self.statusBar = QtGui.QStatusBar(self)
self.layout().addWidget(self.statusBar)
def createNotebook(self):
"""Query the entry fields and append the notebook list"""
desired_name = str(self.nameLineEdit.text())
self.log.info("Desired Notebook name: "+desired_name)
if not desired_name or len(desired_name) < 2:
self.statusBar.showMessage("name too short", 2000)
self.log.info("name rejected: too short")
else:
if desired_name in self.names:
self.statusBar.showMessage("name already used", 2000)
self.log.info("name rejected: already used")
else:
# Actually creating the notebook
self.info.notebooks.append(desired_name+EXTENSION)
self.statusBar.showMessage("Creating notebook", 2000)
self.accept()
class NewFolder(Dialog):
def __init__(self, parent=None):
Dialog.__init__(self, parent)
self.names = [elem for elem in self.info.folders]
self.initUI()
def initUI(self):
self.log.info("Creating a 'New Folder' form")
self.setWindowTitle("New folder")
# Define the field:
# Name
formLayout = QtGui.QFormLayout()
self.nameLineEdit = QtGui.QLineEdit()
formLayout.addRow(self.translate("Folder's &name:"), self.nameLineEdit)
self.layout().addLayout(formLayout)
buttonLayout = QtGui.QHBoxLayout()
# Add the "Create" button, as a confirmation, and the "Cancel" one
self.createButton = QtGui.QPushButton("&Create")
self.createButton.clicked.connect(self.createFolder)
self.cancelButton = QtGui.QPushButton("C&ancel")
self.cancelButton.clicked.connect(self.clean_reject)
buttonLayout.addStretch()
buttonLayout.addWidget(self.createButton)
buttonLayout.addWidget(self.cancelButton)
self.layout().addLayout(buttonLayout)
# Create a status bar
self.statusBar = QtGui.QStatusBar()
self.layout().addWidget(self.statusBar)
def createFolder(self):
desired_name = str(self.nameLineEdit.text())
self.log.info("Desired Folder name: "+desired_name)
if not desired_name or len(desired_name) < 2:
self.statusBar.showMessage("name too short", 2000)
self.log.info("name rejected: too short")
else:
if desired_name in self.names:
self.statusBar.showMessage("name already used", 2000)
self.log.info("name rejected, already used")
else:
# Actually creating the folder
self.info.folders.append(desired_name)
self.statusBar.showMessage("Creating Folder", 2000)
self.accept()
class NewEntry(Dialog):
"""Create a new entry in the notebook"""
def __init__(self, parent=None):
Dialog.__init__(self, parent)
self.initUI()
def initUI(self):
self.log.info("Creating a 'New Entry' form")
self.setWindowTitle("New entry")
# Define the fields: Name, tags and body
titleLineLayout = QtGui.QHBoxLayout()
self.titleLineLabel = QtGui.QLabel("Title:")
self.titleLineLabel.setFixedWidth(40)
self.titleLineEdit = QtGui.QLineEdit()
titleLineLayout.addWidget(self.titleLineLabel)
titleLineLayout.addWidget(self.titleLineEdit)
# create TagCompletion with tags from current file
index = self.parent.tabs.currentIndex()
notebook = os.path.join(self.info.level, self.info.notebooks[index])
self.log.info("reading tags from %s" % notebook)
_, tags = tp.from_notes_to_markdown(notebook)
tagsLineLayout = QtGui.QHBoxLayout()
self.tagsLineLabel = QtGui.QLabel("Tags:")
self.tagsLineLabel.setFixedWidth(40)
self.tagsLineEdit = TagCompletion(tags)
tagsLineLayout.addWidget(self.tagsLineLabel)
tagsLineLayout.addWidget(self.tagsLineEdit)
corpusBoxLayout = QtGui.QHBoxLayout()
self.corpusBoxLabel = QtGui.QLabel("Body:")
self.corpusBoxLabel.setFixedWidth(40)
self.corpusBoxLabel.setAlignment(
QtCore.Qt.AlignTop)
self.corpusBox = QtGui.QTextEdit()
corpusBoxLayout.addWidget(self.corpusBoxLabel)
corpusBoxLayout.addWidget(self.corpusBox)
self.layout().addLayout(titleLineLayout)
self.layout().addLayout(tagsLineLayout)
self.layout().addLayout(corpusBoxLayout)
# Define the RHS with Ok, Cancel and list of tags TODO)
buttonLayout = QtGui.QHBoxLayout()
self.newImageButton = QtGui.QPushButton("Insert &Image")
self.newImageButton.clicked.connect(self.insertImage)
self.okButton = QtGui.QPushButton("&Ok")
self.okButton.clicked.connect(self.creating_entry)
acceptShortcut = QtGui.QShortcut(
QtGui.QKeySequence(self.translate("Shift+Enter")), self.corpusBox)
acceptShortcut.activated.connect(self.creating_entry)
self.cancelButton = QtGui.QPushButton("&Cancel")
self.cancelButton.clicked.connect(self.clean_reject)
# Add a spacer before so that the buttons do not stretch
buttonLayout.addStretch()
buttonLayout.addWidget(self.newImageButton)
buttonLayout.addWidget(self.okButton)
buttonLayout.addWidget(self.cancelButton)
self.layout().addLayout(buttonLayout)
# Create the status bar
self.statusBar = QtGui.QStatusBar(self)
self.layout().addWidget(self.statusBar)
self.titleLineEdit.setFocus()
def creating_entry(self):
# Check if title is valid (non-empty)
title = str(self.titleLineEdit.text())
if not title or len(title) < 2:
self.statusBar.showMessage(self.translate("Invalid title"), 2000)
return
tags = str(self.tagsLineEdit.getTextWithNormalizedSeparators())
if not tags or len(tags) < 2:
self.statusBar.showMessage(self.translate("Invalid tags"), 2000)
return
tags = [tag.strip() for tag in tags.split(',')]
corpus = self.corpusBox.toPlainText()
if not corpus or len(corpus) < 2:
self.statusBar.showMessage(self.translate("Empty entry"), 2000)
return
# Storing the variables to be recovered afterwards
self.title = title
self.tags = tags
self.corpus = corpus
self.clean_accept()
def insertImage(self):
""" insert an image path as markdown at the current cursor position """
self.popup = QtGui.QFileDialog()
filename = self.popup.getOpenFileName(self,
"select an image",
"",
"Image Files (*.png *.jpg *.bmp *.jpeg *.svg *.gif)" + \
";;all files (*.*)")
# QFileDialog returns a tuple with filename and used filter
if filename[0]:
imagemarkdown = tp.create_image_markdown(filename[0])
self.corpusBox.insertPlainText(imagemarkdown)
class SetExternalEditor(Dialog):
"""popup for setting the commandline for the external editor"""
def __init__(self, parent=None):
Dialog.__init__(self, parent)
self.initUI()
def initUI(self):
self.log.info("Creating a 'Set External Editor' form")
self.setWindowTitle("Set External Editor")
# Define the main window horizontal layout
hboxLayout = QtGui.QHBoxLayout()
# Define the field
formLayout = QtGui.QFormLayout()
self.commandlineEdit = QtGui.QLineEdit()
self.commandlineEdit.setText(self.info.externalEditor)
formLayout.addRow(self.tr("&external editor:"), self.commandlineEdit)
hboxLayout.addLayout(formLayout)
# Define the RHS with Ok, Cancel and list of tags TODO)
buttonLayout = QtGui.QVBoxLayout()
self.okButton = QtGui.QPushButton("&Ok")
self.okButton.clicked.connect(self.set_commandline)
self.cancelButton = QtGui.QPushButton("&Cancel")
self.cancelButton.clicked.connect(self.clean_reject)
buttonLayout.addStretch()
buttonLayout.addWidget(self.okButton)
buttonLayout.addWidget(self.cancelButton)
hboxLayout.addLayout(buttonLayout)
# Create the status bar
self.statusBar = QtGui.QStatusBar(self)
# Create a permanent widget displaying what we are doing
statusWidget = \
QtGui.QLabel("setting the commandline for the external editor")
self.statusBar.addPermanentWidget(statusWidget)
self.layout().addLayout(hboxLayout)
self.layout().addWidget(self.statusBar)
def set_commandline(self):
"""check the commandline write it to the settings and return"""
# Check if text is a valid commandline
commandline = str(self.commandlineEdit.text())
if not commandline or len(commandline) < 2:
self.statusBar.showMessage(self.tr("Invalid Commandline"), 2000)
return
# Storing the variables to be recovered afterwards
self.commandline = commandline
self.settings = QtCore.QSettings("audren", "NoteOrganiser")
self.settings.setValue("externalEditor", self.commandline)
self.clean_accept()
| {
"content_hash": "950274f08a6b623ada39cf4e426e0712",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 82,
"avg_line_length": 36.993920972644375,
"alnum_prop": 0.6364308602415578,
"repo_name": "egolus/NoteOrganiser",
"id": "061f03ae93c6a55c7c7549d2e2fc061bfdda2d58",
"size": "12171",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "noteorganiser/popups.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14284"
},
{
"name": "HTML",
"bytes": "38802"
},
{
"name": "Makefile",
"bytes": "380"
},
{
"name": "Python",
"bytes": "134104"
}
],
"symlink_target": ""
} |
import collections
import os
import re
CLANG_DIR = os.path.join(os.path.dirname(__file__), '../..')
FORMAT_STYLE_FILE = os.path.join(CLANG_DIR, 'include/clang/Format/Format.h')
INCLUDE_STYLE_FILE = os.path.join(CLANG_DIR, 'include/clang/Tooling/Inclusions/IncludeStyle.h')
DOC_FILE = os.path.join(CLANG_DIR, 'docs/ClangFormatStyleOptions.rst')
def substitute(text, tag, contents):
replacement = '\n.. START_%s\n\n%s\n\n.. END_%s\n' % (tag, contents, tag)
pattern = r'\n\.\. START_%s\n.*\n\.\. END_%s\n' % (tag, tag)
return re.sub(pattern, '%s', text, flags=re.S) % replacement
def doxygen2rst(text):
text = re.sub(r'<tt>\s*(.*?)\s*<\/tt>', r'``\1``', text)
text = re.sub(r'\\c ([^ ,;\.]+)', r'``\1``', text)
text = re.sub(r'\\\w+ ', '', text)
return text
def indent(text, columns, indent_first_line=True):
indent = ' ' * columns
s = re.sub(r'\n([^\n])', '\n' + indent + '\\1', text, flags=re.S)
if not indent_first_line or s.startswith('\n'):
return s
return indent + s
class Option(object):
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment.strip()
self.enum = None
self.nested_struct = None
def __str__(self):
s = '**%s** (``%s``)\n%s' % (self.name, self.type,
doxygen2rst(indent(self.comment, 2)))
if self.enum:
s += indent('\n\nPossible values:\n\n%s\n' % self.enum, 2)
if self.nested_struct:
s += indent('\n\nNested configuration flags:\n\n%s\n' %self.nested_struct,
2)
return s
class NestedStruct(object):
def __init__(self, name, comment):
self.name = name
self.comment = comment.strip()
self.values = []
def __str__(self):
return '\n'.join(map(str, self.values))
class NestedField(object):
def __init__(self, name, comment):
self.name = name
self.comment = comment.strip()
def __str__(self):
return '\n* ``%s`` %s' % (
self.name,
doxygen2rst(indent(self.comment, 2, indent_first_line=False)))
class Enum(object):
def __init__(self, name, comment):
self.name = name
self.comment = comment.strip()
self.values = []
def __str__(self):
return '\n'.join(map(str, self.values))
class EnumValue(object):
def __init__(self, name, comment):
self.name = name
self.comment = comment
def __str__(self):
return '* ``%s`` (in configuration: ``%s``)\n%s' % (
self.name,
re.sub('.*_', '', self.name),
doxygen2rst(indent(self.comment, 2)))
def clean_comment_line(line):
match = re.match(r'^/// \\code(\{.(\w+)\})?$', line)
if match:
lang = match.groups()[1]
if not lang:
lang = 'c++'
return '\n.. code-block:: %s\n\n' % lang
if line == '/// \\endcode':
return ''
return line[4:] + '\n'
def read_options(header):
class State(object):
BeforeStruct, Finished, InStruct, InNestedStruct, InNestedFieldComent, \
InFieldComment, InEnum, InEnumMemberComment = range(8)
state = State.BeforeStruct
options = []
enums = {}
nested_structs = {}
comment = ''
enum = None
nested_struct = None
for line in header:
line = line.strip()
if state == State.BeforeStruct:
if line == 'struct FormatStyle {' or line == 'struct IncludeStyle {':
state = State.InStruct
elif state == State.InStruct:
if line.startswith('///'):
state = State.InFieldComment
comment = clean_comment_line(line)
elif line == '};':
state = State.Finished
break
elif state == State.InFieldComment:
if line.startswith('///'):
comment += clean_comment_line(line)
elif line.startswith('enum'):
state = State.InEnum
name = re.sub(r'enum\s+(\w+)\s*\{', '\\1', line)
enum = Enum(name, comment)
elif line.startswith('struct'):
state = State.InNestedStruct
name = re.sub(r'struct\s+(\w+)\s*\{', '\\1', line)
nested_struct = NestedStruct(name, comment)
elif line.endswith(';'):
state = State.InStruct
field_type, field_name = re.match(r'([<>:\w(,\s)]+)\s+(\w+);',
line).groups()
option = Option(str(field_name), str(field_type), comment)
options.append(option)
else:
raise Exception('Invalid format, expected comment, field or enum')
elif state == State.InNestedStruct:
if line.startswith('///'):
state = State.InNestedFieldComent
comment = clean_comment_line(line)
elif line == '};':
state = State.InStruct
nested_structs[nested_struct.name] = nested_struct
elif state == State.InNestedFieldComent:
if line.startswith('///'):
comment += clean_comment_line(line)
else:
state = State.InNestedStruct
nested_struct.values.append(NestedField(line.replace(';', ''), comment))
elif state == State.InEnum:
if line.startswith('///'):
state = State.InEnumMemberComment
comment = clean_comment_line(line)
elif line == '};':
state = State.InStruct
enums[enum.name] = enum
else:
raise Exception('Invalid format, expected enum field comment or };')
elif state == State.InEnumMemberComment:
if line.startswith('///'):
comment += clean_comment_line(line)
else:
state = State.InEnum
enum.values.append(EnumValue(line.replace(',', ''), comment))
if state != State.Finished:
raise Exception('Not finished by the end of file')
for option in options:
if not option.type in ['bool', 'unsigned', 'int', 'std::string',
'std::vector<std::string>',
'std::vector<IncludeCategory>',
'std::vector<RawStringFormat>']:
if option.type in enums:
option.enum = enums[option.type]
elif option.type in nested_structs:
option.nested_struct = nested_structs[option.type]
else:
raise Exception('Unknown type: %s' % option.type)
return options
options = read_options(open(FORMAT_STYLE_FILE))
options += read_options(open(INCLUDE_STYLE_FILE))
options = sorted(options, key=lambda x: x.name)
options_text = '\n\n'.join(map(str, options))
contents = open(DOC_FILE).read()
contents = substitute(contents, 'FORMAT_STYLE_OPTIONS', options_text)
with open(DOC_FILE, 'wb') as output:
output.write(contents)
| {
"content_hash": "b371959348c6fd267a72d1c3dd522581",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 95,
"avg_line_length": 32.82142857142857,
"alnum_prop": 0.5891496968754858,
"repo_name": "apple/swift-clang",
"id": "5feb793a4d705178790a7f2d079a235a27c1b5ca",
"size": "6666",
"binary": false,
"copies": "26",
"ref": "refs/heads/stable",
"path": "docs/tools/dump_format_style.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "48411"
},
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "C",
"bytes": "21215199"
},
{
"name": "C#",
"bytes": "27472"
},
{
"name": "C++",
"bytes": "72404163"
},
{
"name": "CMake",
"bytes": "175348"
},
{
"name": "CSS",
"bytes": "8395"
},
{
"name": "Cool",
"bytes": "22451"
},
{
"name": "Cuda",
"bytes": "413843"
},
{
"name": "Dockerfile",
"bytes": "2083"
},
{
"name": "Emacs Lisp",
"bytes": "17001"
},
{
"name": "Forth",
"bytes": "925"
},
{
"name": "Fortran",
"bytes": "8180"
},
{
"name": "HTML",
"bytes": "986812"
},
{
"name": "JavaScript",
"bytes": "42269"
},
{
"name": "LLVM",
"bytes": "27231"
},
{
"name": "M",
"bytes": "4660"
},
{
"name": "MATLAB",
"bytes": "69036"
},
{
"name": "Makefile",
"bytes": "8489"
},
{
"name": "Mathematica",
"bytes": "15066"
},
{
"name": "Mercury",
"bytes": "1193"
},
{
"name": "Objective-C",
"bytes": "3626772"
},
{
"name": "Objective-C++",
"bytes": "1033408"
},
{
"name": "Perl",
"bytes": "96256"
},
{
"name": "Python",
"bytes": "784542"
},
{
"name": "RenderScript",
"bytes": "741"
},
{
"name": "Roff",
"bytes": "10932"
},
{
"name": "Rust",
"bytes": "200"
},
{
"name": "Shell",
"bytes": "10663"
}
],
"symlink_target": ""
} |
import requests
import bs4
from plugin import plugin, require
# TODO: handle errors and instructions better
@require(network=True)
@plugin('lyrics')
class lyrics():
"""
finds lyrics
the format is song,artist
song and artist are separated by a -
-- Example:
lyrics wonderful tonight-eric clapton
"""
def __call__(self, jarvis, s):
jarvis.say(self.find(s))
# info[0] = song
# info[1] = artist
# info[2] = either options or album, depending on how i extend the
# functionality
def find(self, s):
info = self.parse(s)
# TODO: implement find album/song functions
# TODO: implement actual searches in case of not knowing the correct
# full name of song or artist
artist = None
song = None
album = None
if info:
song = info[0]
info.pop(0)
if info:
artist = info[0]
info.pop(0)
if info:
album = info[0]
info.pop(0)
if not song or not artist:
# error if artist or song don't exist
return "you forgot to add either song name or artist name"
response = get_lyric(artist, song)
if response:
return response
else:
return "Song or Singer does not exist or the API does not have lyrics"
@classmethod
def parse(self, s):
# separate song/artist/album by a -
information = s.split('-')
return information
"""
All credit for this code goes to https://github.com/geekpradd
I only fixed some of the issues I was having with some requests,
but since he does not mantain his repository anymore and
forking it just to use it for this project was too much work
I just copied and fix the methods used in PyLyrics
"""
def get_lyric(singer, song):
# Replace spaces with _
singer = singer.replace(' ', '_')
song = song.replace(' ', '_')
url = 'http://lyrics.wikia.com/{0}:{1}'.format(singer, song)
req = requests.get(url)
s = bs4.BeautifulSoup(req.text, "lxml")
# Get main lyrics holder
lyrics = s.find("div", {'class': 'lyricbox'})
if lyrics is None:
return None
# Remove Scripts
[k.extract() for k in lyrics('script')]
# Remove comments
comments = lyrics.findAll(text=lambda text: isinstance(text, bs4.Comment))
# for c in comments:
# c.extract()
# Remove unecessary tags
for tag in ['div', 'i', 'b', 'a']:
for match in lyrics.findAll(tag):
match.replaceWithChildren()
# TODO: check if you need the encode/decode thing, if you do then do a try
# catch for it
# get output as string and remove non unicode characters and replace <br> with newlines
# output = str(lyrics).encode('utf-8', errors = 'replace')[22:-6:] \
# .decode('utf-8').replace('\n','').replace('<br/>','\n')
output = str(lyrics).replace('\n', '').replace('<br/>', '\n')[22:-6:]
try:
return output
except BaseException:
return output.encode('utf-8')
| {
"content_hash": "07a3c59f3f5ad1f5bb62c7f7ce422d1e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 91,
"avg_line_length": 30.215686274509803,
"alnum_prop": 0.5996106424399741,
"repo_name": "sukeesh/Jarvis",
"id": "af5173332588d84e5dcef033b61a8e8cc0e6d2f5",
"size": "3106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jarviscli/plugins/lyrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C++",
"bytes": "179"
},
{
"name": "Dockerfile",
"bytes": "343"
},
{
"name": "Java",
"bytes": "278"
},
{
"name": "Makefile",
"bytes": "730"
},
{
"name": "Python",
"bytes": "780285"
},
{
"name": "Shell",
"bytes": "739"
}
],
"symlink_target": ""
} |
from .model_not_found_exception import ModelNotFoundException
class Importer:
def __init__(self, collection):
self.collection = collection
self.model = False
self.model_fields = []
def import_note(self, import_data):
self.__load_model(import_data['model'])
note = self.__find_or_create_note(import_data['id'])
self.__populate_note_fields(
note, import_data['fields'], import_data['id'])
# Set note deck
note.model()['did'] = self.collection.decks.id(import_data['deck'])
# Save changes
is_new = not hasattr(note, 'mod')
note.flush()
# If note is new, add to collection
if is_new:
self.collection.addNote(note)
return True
def __load_model(self, model_name):
# Get model
model = self.collection.models.byName(model_name)
if model is None:
raise ModelNotFoundException(model_name)
# Set current model
self.collection.models.setCurrent(model)
self.model = model
self.model_fields = self.collection.models.fieldNames(self.model)
def __populate_note_fields(self, note, fields, node_id):
# Set id field to Freeplane node id
id_field = self.__get_model_id_field()
if id_field is not None:
note[id_field] = node_id
# Populate remaining fields
for field in self.model_fields:
if field in fields:
note[field] = fields[field]
def __get_model_id_field(self):
if len(self.model_fields) > 0 and self.model_fields[0].lower() == 'id':
return self.model_fields[0]
else:
return None
def __find_or_create_note(self, node_id):
existing_id = self.collection.db.scalar(
'SELECT id FROM notes WHERE flds LIKE ? AND mid = ?',
str(node_id) + "\x1f%",
self.model['id']
)
if existing_id is not None:
note = self.collection.getNote(existing_id)
if note is not None:
return note
# Existing note wasn't found
return self.collection.newNote()
| {
"content_hash": "85943ff161f81705e432987f466c8cab",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.3,
"alnum_prop": 0.5755362848014606,
"repo_name": "eljay26/anki-freeplane",
"id": "4ff26bdb68bb56325df605268d7b9b3276b660a0",
"size": "2191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freeplane_importer/importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14926"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.support import MicrosoftSupport
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-support
# USAGE
python list_support_tickets_with_a_certain_service_id_for_a_subscription.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MicrosoftSupport(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.support_tickets.list()
for item in response:
print(item)
# x-ms-original-file: specification/support/resource-manager/Microsoft.Support/stable/2020-04-01/examples/ListSupportTicketsServiceIdEqualsForSubscription.json
if __name__ == "__main__":
main()
| {
"content_hash": "94bbe7f4a7af2e9f0fa53483d374a761",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 159,
"avg_line_length": 34.193548387096776,
"alnum_prop": 0.7424528301886792,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6f6feee822eb1e4ef154a16d32d657e8e91ca521",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/support/azure-mgmt-support/generated_samples/list_support_tickets_with_a_certain_service_id_for_a_subscription.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from locust import task
from base import LmsTasks
class CoursewareViewsTasks(LmsTasks):
"""
Models traffic for endpoints in lms.djangoapps.courseware.views
Traffic Distribution on courses.edx.org (last 7d as of 2014-02-24):
/courseware.views:course_about 6088 0.07%
/courseware.views:course_info 847180 10.23%
/courseware.views:course_survey 43 0.00%
/courseware.views:index 4163742 50.26%
/courseware.views:jump_to 4989 0.06%
/courseware.views:jump_to_id 87845 1.06%
/courseware.views:mktg_course_about 2694565 32.52%
/courseware.views:progress 331582 4.00%
/courseware.views:static_tab 148526 1.79%
/courseware.views:submission_history 172 0.00%
/courseware.views:syllabus 3 0.00%
"""
@task(50)
def index(self):
"""
Request a randomly-chosen top-level page in the course.
"""
path = 'courseware' + self.course_data.courseware_path
self.get(path, name='courseware:index')
@task(10)
def course_home(self):
"""
Requests the main course home page that contains the course outline.
"""
self.get('course', name='courseware:course_home')
@task(10)
def info(self):
"""
Request the course info tab.
"""
self.get('info', name='courseware:course_info')
@task(4)
def progress(self):
"""
Request the progress tab.
"""
self.get('progress', name='courseware:progress')
@task(1)
def about(self):
"""
Request the LMS' internal about page for this course.
"""
self.get('about', name='courseware:about')
@task(8)
def stop(self):
"""
Switch to another TaskSet.
"""
self.interrupt()
| {
"content_hash": "431f04d00f0aa50eb1a24abc16969731",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 29.606060606060606,
"alnum_prop": 0.5578300921187308,
"repo_name": "edx/edx-load-tests",
"id": "6ab93585a3994cd8855750f324d127a64df4a235",
"size": "1954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loadtests/lms/courseware_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2741"
},
{
"name": "JavaScript",
"bytes": "9768"
},
{
"name": "Jupyter Notebook",
"bytes": "4305"
},
{
"name": "Makefile",
"bytes": "3964"
},
{
"name": "Python",
"bytes": "284496"
},
{
"name": "Shell",
"bytes": "5986"
}
],
"symlink_target": ""
} |
"""
Utility functions for decoding response bodies.
"""
import codecs
import collections
from io import BytesIO
import gzip
import zlib
import brotli
import zstandard as zstd
from typing import Union, Optional, AnyStr, overload # noqa
# We have a shared single-element cache for encoding and decoding.
# This is quite useful in practice, e.g.
# flow.request.content = flow.request.content.replace(b"foo", b"bar")
# does not require an .encode() call if content does not contain b"foo"
CachedDecode = collections.namedtuple(
"CachedDecode", "encoded encoding errors decoded"
)
_cache = CachedDecode(None, None, None, None)
@overload
def decode(encoded: None, encoding: str, errors: str = 'strict') -> None:
...
@overload
def decode(encoded: str, encoding: str, errors: str = 'strict') -> str:
...
@overload
def decode(encoded: bytes, encoding: str, errors: str = 'strict') -> Union[str, bytes]:
...
def decode(
encoded: Union[None, str, bytes], encoding: str, errors: str = 'strict'
) -> Union[None, str, bytes]:
"""
Decode the given input object
Returns:
The decoded value
Raises:
ValueError, if decoding fails.
"""
if encoded is None:
return None
global _cache
cached = (
isinstance(encoded, bytes) and
_cache.encoded == encoded and
_cache.encoding == encoding and
_cache.errors == errors
)
if cached:
return _cache.decoded
try:
try:
decoded = custom_decode[encoding](encoded)
except KeyError:
decoded = codecs.decode(encoded, encoding, errors) # type: ignore
if encoding in ("gzip", "deflate", "br", "zstd"):
_cache = CachedDecode(encoded, encoding, errors, decoded)
return decoded
except TypeError:
raise
except Exception as e:
raise ValueError("{} when decoding {} with {}: {}".format(
type(e).__name__,
repr(encoded)[:10],
repr(encoding),
repr(e),
))
@overload
def encode(decoded: None, encoding: str, errors: str = 'strict') -> None:
...
@overload
def encode(decoded: str, encoding: str, errors: str = 'strict') -> Union[str, bytes]:
...
@overload
def encode(decoded: bytes, encoding: str, errors: str = 'strict') -> bytes:
...
def encode(decoded: Union[None, str, bytes], encoding, errors='strict') -> Union[None, str, bytes]:
"""
Encode the given input object
Returns:
The encoded value
Raises:
ValueError, if encoding fails.
"""
if decoded is None:
return None
global _cache
cached = (
isinstance(decoded, bytes) and
_cache.decoded == decoded and
_cache.encoding == encoding and
_cache.errors == errors
)
if cached:
return _cache.encoded
try:
try:
encoded = custom_encode[encoding](decoded)
except KeyError:
encoded = codecs.encode(decoded, encoding, errors) # type: ignore
if encoding in ("gzip", "deflate", "br", "zstd"):
_cache = CachedDecode(encoded, encoding, errors, decoded)
return encoded
except TypeError:
raise
except Exception as e:
raise ValueError("{} when encoding {} with {}: {}".format(
type(e).__name__,
repr(decoded)[:10],
repr(encoding),
repr(e),
))
def identity(content):
"""
Returns content unchanged. Identity is the default value of
Accept-Encoding headers.
"""
return content
def decode_gzip(content: bytes) -> bytes:
if not content:
return b""
gfile = gzip.GzipFile(fileobj=BytesIO(content))
return gfile.read()
def encode_gzip(content: bytes) -> bytes:
s = BytesIO()
gf = gzip.GzipFile(fileobj=s, mode='wb')
gf.write(content)
gf.close()
return s.getvalue()
def decode_brotli(content: bytes) -> bytes:
if not content:
return b""
return brotli.decompress(content)
def encode_brotli(content: bytes) -> bytes:
return brotli.compress(content)
def decode_zstd(content: bytes) -> bytes:
if not content:
return b""
zstd_ctx = zstd.ZstdDecompressor()
try:
return zstd_ctx.decompress(content)
except zstd.ZstdError:
# If the zstd stream is streamed without a size header,
# try decoding with a 10MiB output buffer
return zstd_ctx.decompress(content, max_output_size=10 * 2 ** 20)
def encode_zstd(content: bytes) -> bytes:
zstd_ctx = zstd.ZstdCompressor()
return zstd_ctx.compress(content)
def decode_deflate(content: bytes) -> bytes:
"""
Returns decompressed data for DEFLATE. Some servers may respond with
compressed data without a zlib header or checksum. An undocumented
feature of zlib permits the lenient decompression of data missing both
values.
http://bugs.python.org/issue5784
"""
if not content:
return b""
try:
return zlib.decompress(content)
except zlib.error:
return zlib.decompress(content, -15)
def encode_deflate(content: bytes) -> bytes:
"""
Returns compressed content, always including zlib header and checksum.
"""
return zlib.compress(content)
custom_decode = {
"none": identity,
"identity": identity,
"gzip": decode_gzip,
"deflate": decode_deflate,
"br": decode_brotli,
"zstd": decode_zstd,
}
custom_encode = {
"none": identity,
"identity": identity,
"gzip": encode_gzip,
"deflate": encode_deflate,
"br": encode_brotli,
"zstd": encode_zstd,
}
__all__ = ["encode", "decode"]
| {
"content_hash": "141886b531436ac4f03e9e8314d43774",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 99,
"avg_line_length": 24.913419913419915,
"alnum_prop": 0.6116420503909644,
"repo_name": "vhaupert/mitmproxy",
"id": "164439422e113c8e1133bd90cfa387a74655a439",
"size": "5755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mitmproxy/net/http/encoding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "186961"
},
{
"name": "HTML",
"bytes": "3034"
},
{
"name": "JavaScript",
"bytes": "2254958"
},
{
"name": "PowerShell",
"bytes": "362"
},
{
"name": "Python",
"bytes": "1312583"
},
{
"name": "Shell",
"bytes": "3726"
}
],
"symlink_target": ""
} |
import os
from st2common.constants.pack import SYSTEM_PACK_NAME
from st2common.constants.sensors import MINIMUM_POLL_INTERVAL
from st2common.models.db.sensor import SensorTypeDB
from st2common.services import triggers as trigger_service
__all__ = [
'to_sensor_db_model',
'get_sensor_entry_point',
'create_trigger_types'
]
def to_sensor_db_model(sensor_api_model=None):
"""
Converts a SensorTypeAPI model to DB model.
Also, creates trigger type objects provided in SensorTypeAPI.
:param sensor_api_model: SensorTypeAPI object.
:type sensor_api_model: :class:`SensorTypeAPI`
:rtype: :class:`SensorTypeDB`
"""
class_name = getattr(sensor_api_model, 'class_name', None)
pack = getattr(sensor_api_model, 'pack', None)
entry_point = get_sensor_entry_point(sensor_api_model)
artifact_uri = getattr(sensor_api_model, 'artifact_uri', None)
description = getattr(sensor_api_model, 'description', None)
trigger_types = getattr(sensor_api_model, 'trigger_types', [])
poll_interval = getattr(sensor_api_model, 'poll_interval', None)
enabled = getattr(sensor_api_model, 'enabled', True)
poll_interval = getattr(sensor_api_model, 'poll_interval', None)
if poll_interval and (poll_interval < MINIMUM_POLL_INTERVAL):
raise ValueError('Minimum possible poll_interval is %s seconds' %
(MINIMUM_POLL_INTERVAL))
# Add pack to each trigger type item
for trigger_type in trigger_types:
trigger_type['pack'] = pack
trigger_type_refs = create_trigger_types(trigger_types)
return _create_sensor_type(pack=pack,
name=class_name,
description=description,
artifact_uri=artifact_uri,
entry_point=entry_point,
trigger_types=trigger_type_refs,
poll_interval=poll_interval,
enabled=enabled)
def create_trigger_types(trigger_types):
if not trigger_types:
return []
# Add TrigerType models to the DB
trigger_type_dbs = trigger_service.add_trigger_models(trigger_types=trigger_types)
trigger_type_refs = []
# Populate a list of references belonging to this sensor
for trigger_type_db, _ in trigger_type_dbs:
ref_obj = trigger_type_db.get_reference()
trigger_type_ref = ref_obj.ref
trigger_type_refs.append(trigger_type_ref)
return trigger_type_refs
def _create_sensor_type(pack=None, name=None, description=None, artifact_uri=None,
entry_point=None, trigger_types=None, poll_interval=10, enabled=True):
sensor_type = SensorTypeDB(pack=pack, name=name, description=description,
artifact_uri=artifact_uri, entry_point=entry_point,
poll_interval=poll_interval, enabled=enabled,
trigger_types=trigger_types)
return sensor_type
def get_sensor_entry_point(sensor_api_model):
file_path = getattr(sensor_api_model, 'artifact_uri', None)
class_name = getattr(sensor_api_model, 'class_name', None)
pack = getattr(sensor_api_model, 'pack', None)
if pack == SYSTEM_PACK_NAME:
# Special case for sensors which come included with the default installation
entry_point = class_name
else:
module_path = file_path.split('/%s/' % (pack))[1]
module_path = module_path.replace(os.path.sep, '.')
module_path = module_path.replace('.py', '')
entry_point = '%s.%s' % (module_path, class_name)
return entry_point
| {
"content_hash": "eef703bff8da66bd60d4d3909ad63eb1",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 94,
"avg_line_length": 39.180851063829785,
"alnum_prop": 0.635894651099647,
"repo_name": "emedvedev/st2",
"id": "5b85d63223e46ba3fbdfd76dedc1f1f8405881ea",
"size": "4463",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "st2common/st2common/models/utils/sensor_type_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41694"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3717722"
},
{
"name": "Shell",
"bytes": "38637"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
""" Extract features in time domain like simple amplitudes, signal differentiation or polynomial fit """
import numpy
import warnings
import logging
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.missions.nodes.decorators import BooleanParameter, NoOptimizationParameter, NormalParameter, \
QNormalParameter, ChoiceParameter, QUniformParameter
from pySPACE.resources.data_types.feature_vector import FeatureVector
@BooleanParameter("absolute")
class TimeDomainFeaturesNode(BaseNode):
""" Use the samples of the time series as features
This node uses the values of the channels at certain points of time
within the window directly as features.
**Parameters**
:datapoints:
The indices of the data points that are used as features. If None,
all data points are used.
(*optional, default: None*)
:absolute:
If True, the absolute value of each amplitude value is used.
Recommended for classification using only the EMG signal.
(*optional, default: False*)
**Exemplary Call**
.. code-block:: yaml
-
node : Time_Domain_Features
parameters :
datapoints : [-4,-3,-2,-1] # means last 4 samples
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2008/08/26
:Revised: 2009/07/16
"""
def __init__(self,
datapoints = None,
absolute = False,
**kwargs):
super(TimeDomainFeaturesNode, self).__init__(**kwargs)
self.set_permanent_attributes(datapoints=datapoints,
absolute=absolute,
feature_names=[])
def _execute(self, x):
""" Extract the TD features from the given data x """
y = x.view(numpy.ndarray)
if self.datapoints == "None":
self.datapoints = None
if self.datapoints is None or self.datapoints == 0:
self.datapoints = range(y.shape[0])
# Mapping from data point index to relative time to onset
def indexToTime(index):
if index >= 0:
return float(index) / x.sampling_frequency
else:
return (x.shape[0] + float(index)) / x.sampling_frequency
# We project onto the data points that should be used as features
y = y[self.datapoints,:]
if self.absolute:
y = numpy.fabs(y)
y = y.T
# Use all remaining values as features
features = y.reshape((1, y.shape[0] * y.shape[1]))
# If not already done, we determine the name of the features
if self.feature_names == []:
for channel_name in x.channel_names:
for index in self.datapoints:
self.feature_names.append("TD_%s_%.3fsec" %
(channel_name,
indexToTime(index)))
# Create and return the feature vector
feature_vector = \
FeatureVector(numpy.atleast_2d(features).astype(numpy.float64),
self.feature_names)
return feature_vector
@NoOptimizationParameter("absolute")
class CustomChannelWiseFeatureNode(TimeDomainFeaturesNode):
""" Use the result of a transformation of the time series as features.
This node applies a given transformation to the data of each individual
data channel. The result is consequently used as features.
**Parameters**
:feature_function:
A string that defines the transformation of the univariate time
series. This string will be evaluated in a
``eval('lambda x:' + feature_function)``
statement. Therefore, ``'x'`` has to be used as placeholder for the
input data. The output has to be array-like, dimensions do not
matter. Note that numpy can directly be used. To use other external
libraries, use, e.g., the following syntax:
``"__import__('statsmodels.tsa.api').tsa.api.AR(x).fit(maxlag=3).params[1:]"``
**Note**
The *datapoints* parameter provided by the TimeDomainFeaturesNode can
also be used here. The *absolute* parameter, however, is not supported.
If the absolute value shall be computed, this can be done in the
*feature_function*.
**Exemplary Call**
.. code-block:: yaml
-
node : Custom_Features
parameters :
feature_function : "numpy.dot(x,x)"
:Author: David Feess (david.feess@dfki.de)
:Created: 2012/07/29
"""
def __init__(self, feature_function, **kwargs):
super(CustomChannelWiseFeatureNode, self).__init__(**kwargs)
if 'absolute' in kwargs.keys():
warnings.warn("Custom_Features does not support 'absolute'!")
### Place to define abbreviations. The feature_function strings are
### matched with some patterns (using regexp) and then replaced by the
### actual string that describes the functions. Add your stuff here!
import re
# check if an abbreviation is used:
# AR[p]:
if re.match("AR\[([1-9])+\]",feature_function)!=None:
p = feature_function.strip('AR[]')
feature_function = "__import__('statsmodels.tsa.api').tsa.api.AR(x).fit(maxlag="+p+").params[1:]"
# ARMA[p,q]: (note that ARMA is extremely slow)
if re.match("ARMA\[([1-9])+,([1-9])+\]",feature_function)!=None:
(p,q) = feature_function.strip('ARMA[]').split(',')
feature_function = "__import__('statsmodels.tsa.api').tsa.api.ARMA(x).fit(order=("+p+","+q+")).params[1:]"
self.set_permanent_attributes(feature_function = feature_function, #str
feat_func = None) # the actual function
def _execute(self, x):
""" Extract the TD features from the given data x """
y=x.view(numpy.ndarray)
if self.datapoints == "None":
self.datapoints = None
if self.datapoints == None or self.datapoints == 0:
self.datapoints = range(y.shape[0])
# We project onto the data points that should be used as features
y = y[self.datapoints,:]
# generate feat_func from string representation if not done yet
if self.feat_func == None:
self.feat_func = eval("lambda x: numpy.atleast_1d(" +
self.feature_function +
").flatten()")
# initialize 2D array for transformation results
nr_feats_per_channel = len(self.feat_func(y[:,0]))
res = numpy.zeros((nr_feats_per_channel,y.shape[1]))
# eval fet_func for each channel
for curr_chan in range(y.shape[1]):
try:
res[:,curr_chan] = self.feat_func(y[:,curr_chan])
except: # pass zeros as features
res[:,curr_chan] = numpy.zeros_like(res[:,curr_chan])
warnings.warn("Feature Function failed or delivered wrong " +
"dimensions for channel %s in window: %s. "
% (x.channel_names[curr_chan],x.tag) +
"Wrote zeros in the feature vector instead.")
# flatten, such that feats from one channel stay grouped together
features = res.flatten('F')
# Feature names
if self.feature_names == []:
for channel_name in x.channel_names:
for i in range(nr_feats_per_channel):
self.feature_names.append("CustomFeature1_%s_%d" %
(channel_name, i))
# Create and return the feature vector
feature_vector = \
FeatureVector(numpy.atleast_2d(features).astype(numpy.float64),
self.feature_names)
return feature_vector
# TODO: These two nodes need memory optimization ...
class TimeDomainDifferenceFeatureNode(BaseNode):
""" Use differences between channels and/or times as features.
This node uses differences between channels (Inter_Channel) at the same
time and between different times (Intra_Channel) on the same channel as
features.
**Parameters**
:datapoints:
The indices of the data points that are used. If None, all
data points are used.
(*Optional, default: None*)
:moving_window_length:
If this parameter is greater than one, then not the data point x[i]
is used but the average of the k=*moving_window_length* elements
around x[i], i.e. avg([x[i-k/2],...,x[i+k/2]).
(*Optional, default: 1*)
**Known issues**
In the current version this produces to much data, even just for one
choice.
**Exemplary Call**
.. code-block:: yaml
-
node : Time_Domain_Difference_Features
parameters :
datapoints : None
moving_window_length : 1
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2008/08/26
:Revised: 2009/07/16
"""
def __init__(self,
datapoints = None,
moving_window_length = 1,
**kwargs):
super(TimeDomainDifferenceFeatureNode, self).__init__(**kwargs)
self.set_permanent_attributes(datapoints = datapoints,
moving_window_length = moving_window_length,
feature_names = []) #bug: should be feature_names
def _execute(self, x):
"""
Extract the TD features from the given data x
"""
#TODO: Shorten maybe this code
if self.datapoints == "None":
self.datapoints = None
if self.datapoints == None:
self.datapoints = range(x.shape[0])
y=x.view(numpy.ndarray)
#From each selected channel we extract the specified datapoints
indices = []
for datapoint in self.datapoints:
indices.append(range(max(0, datapoint - \
self.moving_window_length / 2), min(x.shape[0], datapoint + \
(self.moving_window_length + 1) / 2)))
channel_features = dict()
for channel_name in x.channel_names:
channel_index = x.channel_names.index(channel_name)
for number, index_range in enumerate(indices):
channel_features[(channel_name, number)] = \
numpy.mean(y[index_range, channel_index])
# Mapping from datapoint index to relative time to onset
def indexToTime(index):
if index >= 0:
return float(index) / x.sampling_frequency
else:
return (x.end_time - x.start_time)/ 1000.0 \
+ float(index) / x.sampling_frequency
features = []
for channel1, number1 in channel_features.iterkeys():
for channel2, number2 in channel_features.iterkeys():
if channel1 == channel2 and number1 > number2:
features.append(channel_features[(channel1, number1)] - \
channel_features[(channel2, number2)])
elif number1 == number2 and channel1 != channel2:
features.append(channel_features[(channel1, number1)] - \
channel_features[(channel2, number2)])
if self.feature_names == []:
for channel1, number1 in channel_features.iterkeys():
for channel2, number2 in channel_features.iterkeys():
if channel1 == channel2 and number1 > number2:
self.feature_names.append( \
"TDIntraChannel_%s_%.3fsec_%.3fsec" % (channel1,
indexToTime(number1), indexToTime(number2)))
elif number1 == number2 and channel1 != channel2:
self.feature_names.append( \
"TDInterChannel_%s-%s_%.3fsec" % (channel1,
channel2, indexToTime(number1)))
feature_vector = \
FeatureVector(numpy.atleast_2d(features).astype(numpy.float64),
self.feature_names)
return feature_vector
class SimpleDifferentiationFeatureNode(BaseNode):
""" Use differences between successive times on the same channel.
This node uses differences between successive times on the same channel
of the time series as features to simulate differentiation.
**Parameters**
:datapoints:
The indices of the data points that are used. If None, all
data points are used.
(*Optional, default: None*)
:moving_window_length:
If this parameter is greater than one, then not the data point x[i]
is used but the average of the k=*moving_window_length* elements
around x[i], i.e. avg([x[i-k/2],...,x[i+k/2]).
(*Optional, default: 1*)
**Known Issues**
.. todo:: new node with same result as a new time series
**Exemplary Call**
.. code-block:: yaml
-
node : Derivative_Features
parameters :
datapoints : None
moving_window_length : 1
:Author: Mario Krell (Mario.Krell@dfki.de)
"""
# Simple copy of the TimeDifferenceFeatureNode with slight change at the
# end. More complex would be a short time regression between more than two
# points. Yet the possiblity to make it a "real" differentiation is in
# comment.
def __init__(self,
datapoints = None,
moving_window_length = 1,
**kwargs):
super(SimpleDifferentiationFeatureNode, self).__init__(**kwargs)
self.set_permanent_attributes(datapoints = datapoints,
moving_window_length = moving_window_length)
def _execute(self, x):
"""
Extract the TD features from the given data x
"""
if self.datapoints == "None":
self.datapoints = None
if self.datapoints == None:
self.datapoints = range(x.shape[0])
y=x.view(numpy.ndarray)
# From each selected channel we extract the specified data points
indices = []
for datapoint in self.datapoints:
indices.append(range(max(0,
int(datapoint - self.moving_window_length / 2)),
int(min(x.shape[0], datapoint + (self.moving_window_length + 1) / 2))
))
channel_features = dict()
for channel_name in x.channel_names:
channel_index = x.channel_names.index(channel_name)
for number, index_range in enumerate(indices):
channel_features[(channel_name, number)] = \
numpy.mean(y[index_range, channel_index])
# Mapping from datapoint index to relative time to onset
def indexToTime(index):
if index >= 0:
return float(index) / x.sampling_frequency
else:
return (x.end_time - x.start_time)/ 1000.0 \
+ float(index) / x.sampling_frequency
features = []
feature_names = []
for channel1, number1 in channel_features.iterkeys():
# intuitive derivative quotient
number2 = number1 + 1
if (channel1, number2) in channel_features.iterkeys():
features.append(channel_features[(channel1, number2)] - \
channel_features[(channel1, number1)])#*sampling_frequency
feature_names.append("Df2_%s_%.3fsec" %
(channel1, indexToTime(number1)))
# Method taken frome http://www.holoborodko.com/pavel/?page_id=245
# f'(x)=\\frac{2(f(x+h)-f(x-h))-(f(x+2h)-f(x-2h))}{8h}
# Further smoothing functions are available, but seemingly not
# necessary, because we have already a smoothing of the signal
# when doing the subsampling.
number3 = number1 + 4
number = number1 + 2
if (channel1, number3) in channel_features.iterkeys():
features.append(2.0 * (channel_features[(channel1, number+1)]\
- channel_features[(channel1, number-1)]) - \
(channel_features[(channel1, number+2)] - channel_features[(\
channel1, number-2)]))#*8*sampling_frequency
feature_names.append("Df5_%s_%.3fsec" %
(channel1, indexToTime(number)))
feature_vector = \
FeatureVector(numpy.atleast_2d(features).astype(numpy.float64),
feature_names)
features = []
feature_names = []
channel_features = dict()
return feature_vector
@ChoiceParameter("coefficients_used", choices=[[0], [1], [0, 1]])
@QUniformParameter("stepsize", min_value=0, max_value=1000, q=20)
@QUniformParameter("segment_width", min_value=0, max_value=500, q=100)
class LocalStraightLineFeatureNode(BaseNode):
""" Fit straight lines to channel segments and uses coefficients as features.
Fit first order polynomials (straight lines) to subsegments of the
channels and use the learned coefficients as features.
**Parameters**
:segment_width:
The width of the segments (in milliseconds) to which straight lines
are fitted.
.. note:: segment_width is rounded such that it becomes a multiple
of the sampling interval.
:stepsize:
The time (in milliseconds) the segments are shifted. Extracted
segments are [0, segment_width], [stepsize, segment_width+stepsize],
[2*stepsize, segment_width+2*stepsize], ...
.. note:: stepsize is rounded such that it becomes a multiple
of the sampling interval.
:coefficients_used:
List of the coefficients of the straight line that are actually used
as features. The offset of the straight line is the coefficient 0 and
the slope is coefficient 1. Per default, both are used as features.
(*optional, default: [0, 1]*)
**Exemplary Call**
.. code-block:: yaml
-
node : Local_Straightline_Features
parameters :
segment_width : 1000
stepsize : 1000
.. todo::
Check if segment width and stepsize make sense in relation to data.
Program should not crash for bad choice (e.g., 400 and 200 on default data)?
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2011/01/04
:Refactored: 2012/01/18
"""
input_types=["TimeSeries"]
def __init__(self, segment_width, stepsize, coefficients_used=[0, 1],
*args, **kwargs):
super(LocalStraightLineFeatureNode, self).__init__(*args, **kwargs)
assert len(coefficients_used) <= 2 and \
len(set(coefficients_used).difference([0,1])) == 0, \
"Only the coefficients 0 (offset) and 1 (slope) are " \
"supported!"
# Externally, coefficient 0 denotes the offset and coefficient 1
# denotes the slope. polyfit handles this the other way around.
coefficients_used = 1 - numpy.array(coefficients_used)
self.set_permanent_attributes(segment_width = segment_width,
stepsize = stepsize,
coefficients_used = coefficients_used)
def _execute(self, data):
# Convert window_width and step size from milliseconds to data points
segment_width = self.segment_width / 1000.0 * data.sampling_frequency
segment_width = int(round(segment_width))
stepsize = self.stepsize / 1000.0 * data.sampling_frequency
stepsize = int(round(stepsize))
if stepsize <= 0:
stepsize = 1000
self._log("Too small stepsize used! Changed to 1000.",
level=logging.ERROR)
sample_width = int(1000 / data.sampling_frequency)
# The subwindows of the time series to which a straight line is fitted
num_windows = \
data.shape[1] * ((data.shape[0] - segment_width) / stepsize + 1)
windows = numpy.zeros((segment_width, num_windows))
feature_names = []
counter = 0
data_array = data.view(numpy.ndarray)
for channel_index, channel_name in enumerate(data.channel_names):
start = 0 # Start of segment (index)
while start + segment_width <= data.shape[0]:
# Compute and round start and end of segment
end = start + segment_width
# calculate sub-windows
windows[:, counter] = \
data_array[start:end, channel_index]
#coefficients_used is inverted (see __init__)
#feature name consists of start and end time
if 0 in self.coefficients_used:
feature_names.append("LSFOffset_%s_%.3fsec_%.3fsec" \
% (channel_name,
float(start * sample_width)/1000.0,
float(end * sample_width)/1000.0))
if 1 in self.coefficients_used:
feature_names.append("LSFSlope_%s_%.3fsec_%.3fsec" \
% (channel_name,
float(start * sample_width)/1000.0,
float(end * sample_width)/1000.0))
# Move to next segment
start = start + stepsize
counter += 1
assert counter == windows.shape[1]
# Compute the local straight line features
coeffs = numpy.polyfit(range(windows.shape[0]), windows, 1)
coeffs = coeffs[self.coefficients_used].flatten('F')
feature_vector = \
FeatureVector(numpy.atleast_2d(coeffs).astype(numpy.float64),
feature_names)
return feature_vector
_NODE_MAPPING = {"Time_Domain_Features": TimeDomainFeaturesNode,
"TDF": TimeDomainFeaturesNode,
"Time_Domain_Difference_Features": TimeDomainDifferenceFeatureNode,
"Derivative_Features": SimpleDifferentiationFeatureNode,
"Local_Straightline_Features" : LocalStraightLineFeatureNode,
"Custom_Features": CustomChannelWiseFeatureNode}
| {
"content_hash": "24684267a7bdf45114827847bdfd5cd8",
"timestamp": "",
"source": "github",
"line_count": 562,
"max_line_length": 118,
"avg_line_length": 42.329181494661924,
"alnum_prop": 0.5498759931060574,
"repo_name": "pyspace/pyspace",
"id": "78f13aff6d4cd6beed3d338e7950e3c46cff3727",
"size": "23789",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pySPACE/missions/nodes/feature_generation/time_domain_features.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11128"
},
{
"name": "C++",
"bytes": "309606"
},
{
"name": "Matlab",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "3160853"
},
{
"name": "QMake",
"bytes": "3217"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
import requests
import mongoengine
import socket
import datetime
from flask import request
from flask_restful import abort
from irianas_server.models import \
Client, LogResource, HTTP, SSH, DATABASE, DNS, FTP
ip_server = socket.gethostbyname(socket.gethostname())
url_client = 'https://{ip}:9000/api/connect'
url_client_task = 'https://{ip}:9000/api/task/{action}'
url_client_conf = 'https://{ip}:9000/api/services/conf/{service}'
class ManageClient(object):
@staticmethod
def connect_client():
url = url_client.format(ip=request.form['ip'])
try:
req = requests.post(
url, data=dict(ip=ip_server),
verify=False)
except requests.ConnectionError:
return abort(404)
if req.status_code == 200:
result = req.json()
try:
client = Client.objects.get(ip_address=request.form['ip'])
except mongoengine.DoesNotExist:
client = Client(ip_address=request.form['ip'])
client.token = result['token']
if client.save():
ManageClient.get_config(client)
return dict(status=1)
else:
return dict(status=-1)
elif req.status_code == 401:
return dict(status=-1)
return dict(status=0)
@staticmethod
def get_config(client):
ip = client.ip_address
token = client.token
dict_services = dict(apache=HTTP, ssh=SSH, bind=DNS, vsftpd=FTP,
mysql=DATABASE)
for service, value in dict_services.iteritems():
try:
req = requests.get(
url_client_conf.format(ip=ip, service=service),
data=dict(ip=ip_server, token=token),
verify=False)
except requests.ConnectionError:
return False
if req.status_code == 200:
result = req.json()
try:
serv_document = value.objects.get(client=ip)
serv_document.delete()
except mongoengine.DoesNotExist:
pass
value(last_change=datetime.datetime.now(),
client=ip,
**result).save()
else:
continue
return True
@staticmethod
def delete_client():
client = None
try:
client = Client.objects.get(ip_address=request.form['ip'])
except mongoengine.DoesNotExist:
return abort(404)
token = client.token
try:
url = url_client.format(ip=request.form['ip'])
req = requests.get(url, data=dict(ip=ip_server, token=token),
verify=False)
except requests.ConnectionError:
return abort(404)
if req.status_code == 200:
result = req.json()
if result.get('logout') == 1:
client.delete()
return result
return (406)
class ClientBasicTask(object):
@staticmethod
def request_task(action):
ip = request.form['ip']
try:
client = Client.objects.get(ip_address=ip)
except mongoengine.DoesNotExist:
return abort(404)
token = client.token
data = dict(ip=ip_server, token=token)
if action == 'monitor':
try:
url = url_client_task.format(ip=ip, action=action)
req = requests.get(url, data=data, verify=False)
except requests.ConnectionError:
return abort(404)
if req.status_code == 200:
result = req.json()
lg = LogResource()
lg.client = client
lg.cpu = result['cpu']
lg.memory = result['memory']
lg.disk = result['disk']
lg.date = datetime.datetime.now()
lg.save()
return result
elif action in ['shut', 'reboot', 'hibernate', 'update']:
try:
url = url_client_task.format(ip=ip, action=action)
req = requests.get(url, data=data, verify=False)
except requests.ConnectionError:
return abort(404)
return dict(status=1)
elif action == 'info':
try:
url = url_client_task.format(ip=ip, action=action)
req = requests.get(url, data=data, verify=False)
except requests.ConnectionError:
return abort(404)
if req.status_code == 200:
return req.json()
return abort(404)
return abort(500)
class PutConfigService(object):
@staticmethod
def put_config_service(service, ip, data, token):
data_server = dict(ip=ip_server, token=token)
try:
req = requests.put(
url_client_conf.format(ip=ip, service=service),
data=dict(data.items() + data_server.items()),
verify=False)
except requests.ConnectionError:
return dict(result=0)
if req.status_code == 200:
return req.json()
return dict(result=0)
| {
"content_hash": "9bbb1a2357ab6753cf7ba1e3c6325a5d",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 74,
"avg_line_length": 31.146198830409357,
"alnum_prop": 0.5257228689447991,
"repo_name": "Irigonzalez/irianas-server",
"id": "181fbffb7b80956dffddeda3e10d57574fe1d286",
"size": "5326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irianas_server/client/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48075"
},
{
"name": "Shell",
"bytes": "5139"
}
],
"symlink_target": ""
} |
import sublime
import sys
import imp
reloader = 'library.reloader'
try:
# ST3
reloader = 'ERB Autocomplete.' + reloader
except (ImportError, ValueError):
# ST2
pass
if reloader in sys.modules:
imp.reload(sys.modules[reloader])
try:
# ST3
from .library import reloader
from .library.commands.toggle_mark import ToggleMarkCommand
from .library.commands.create_layout import CreateLayoutCommand
from .library.commands.mapping import *
from .library.commands.unmapping import UnmappingLayoutCommand
from .library.events.listener import ERBAutocompleteListener
except (ImportError, ValueError):
# ST2
from library import reloader
from library.commands.toggle_mark import ToggleMarkCommand
from library.commands.create_layout import CreateLayoutCommand
from library.commands.mapping import *
from library.commands.unmapping import UnmappingLayoutCommand
from library.events.listener import ERBAutocompleteListener
| {
"content_hash": "f14cb313a7410d21203d564b0efa83f6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 30.875,
"alnum_prop": 0.7692307692307693,
"repo_name": "CasperLaiTW/ERBAutocomplete",
"id": "6d24503f3383c3ede4e77480587b1815e6c30111",
"size": "988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "erb_autocomplete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11980"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import DeviceToken
class DeviceTokenAdmin(admin.ModelAdmin):
list_filter = ["platform"]
admin.site.register(DeviceToken, DeviceTokenAdmin)
| {
"content_hash": "81282732c910b88d732d83acbfb6c86c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 24,
"alnum_prop": 0.796875,
"repo_name": "theju/wush",
"id": "3e869d6f9726ffa42f4226ed1ad75c4606e66bba",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wush/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8597"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_PathBenchmarks(object):
def setupUi(self, PathBenchmarks):
PathBenchmarks.setObjectName(_fromUtf8("PathBenchmarks"))
PathBenchmarks.resize(400, 300)
PathBenchmarks.setWindowTitle(_fromUtf8(""))
self.horizontalLayout_2 = QtGui.QHBoxLayout(PathBenchmarks)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem = QtGui.QSpacerItem(91, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.widget = QtGui.QWidget(PathBenchmarks)
self.widget.setObjectName(_fromUtf8("widget"))
self.formLayout = QtGui.QFormLayout(self.widget)
self.formLayout.setMargin(0)
self.formLayout.setMargin(0)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.label_2 = QtGui.QLabel(self.widget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.filetype = QtGui.QComboBox(self.widget)
self.filetype.setObjectName(_fromUtf8("filetype"))
self.filetype.addItem(_fromUtf8(""))
self.filetype.addItem(_fromUtf8(""))
self.filetype.addItem(_fromUtf8(""))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.filetype)
self.info = QtGui.QLabel(self.widget)
self.info.setObjectName(_fromUtf8("info"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.info)
self.path = PathSelector(self.widget)
self.path.setMinimumSize(QtCore.QSize(200, 0))
self.path.setObjectName(_fromUtf8("path"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.path)
self.horizontalLayout_2.addWidget(self.widget)
spacerItem1 = QtGui.QSpacerItem(90, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.retranslateUi(PathBenchmarks)
QtCore.QMetaObject.connectSlotsByName(PathBenchmarks)
def retranslateUi(self, PathBenchmarks):
self.label.setText(QtGui.QApplication.translate("PathBenchmarks", "Location:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("PathBenchmarks", "Type:", None, QtGui.QApplication.UnicodeUTF8))
self.filetype.setItemText(0, QtGui.QApplication.translate("PathBenchmarks", "Functions", None, QtGui.QApplication.UnicodeUTF8))
self.filetype.setItemText(1, QtGui.QApplication.translate("PathBenchmarks", "Circuits", None, QtGui.QApplication.UnicodeUTF8))
self.filetype.setItemText(2, QtGui.QApplication.translate("PathBenchmarks", "Truth Tables", None, QtGui.QApplication.UnicodeUTF8))
self.info.setText(QtGui.QApplication.translate("PathBenchmarks", "Please choose a location", None, QtGui.QApplication.UnicodeUTF8))
from core.PathSelector import PathSelector
| {
"content_hash": "7ecefa547d0665f90c54c779f57d863b",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 139,
"avg_line_length": 57.31578947368421,
"alnum_prop": 0.7223752678298133,
"repo_name": "ajavadia/ScaffCC",
"id": "5e124e3c7a4d568056601ebe9a8f6965eea07abf",
"size": "3502",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rkqc/tools/gui/ui/PathBenchmarks.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3493637"
},
{
"name": "Batchfile",
"bytes": "753"
},
{
"name": "C",
"bytes": "20644720"
},
{
"name": "C++",
"bytes": "54622765"
},
{
"name": "CMake",
"bytes": "198301"
},
{
"name": "CSS",
"bytes": "48884"
},
{
"name": "Cuda",
"bytes": "5785"
},
{
"name": "Emacs Lisp",
"bytes": "20994"
},
{
"name": "Groff",
"bytes": "18799"
},
{
"name": "HTML",
"bytes": "3199845"
},
{
"name": "JavaScript",
"bytes": "17391"
},
{
"name": "LLVM",
"bytes": "10223782"
},
{
"name": "M",
"bytes": "219"
},
{
"name": "M4",
"bytes": "189436"
},
{
"name": "Makefile",
"bytes": "414982"
},
{
"name": "Matlab",
"bytes": "21976"
},
{
"name": "Mercury",
"bytes": "1195"
},
{
"name": "OCaml",
"bytes": "340220"
},
{
"name": "Objective-C",
"bytes": "1625435"
},
{
"name": "Objective-C++",
"bytes": "319231"
},
{
"name": "Perl",
"bytes": "184639"
},
{
"name": "Python",
"bytes": "669722"
},
{
"name": "Shell",
"bytes": "558812"
},
{
"name": "SourcePawn",
"bytes": "1128"
},
{
"name": "Standard ML",
"bytes": "2841"
},
{
"name": "VimL",
"bytes": "12476"
}
],
"symlink_target": ""
} |
import optparse
import sys
import os
from reusable import parseFile
import math
'''Parse command line arguments'''
parser = optparse.OptionParser(description='Search for artifacts in secondary electron emission data.', usage='%prog filename')
(options, args) = parser.parse_args()
try:
filename = args[0]
except IndexError:
print 'error - No file selected.'
sys.exit()
args = options
'''Read data from file using `reusable.py`'''
(numAtoms, dimensions, resolution, dV, data, protonPosition, alPositions) = parseFile(filename)
'''Default to the center of the cube; if a proton is present, center on that.'''
ymin = dimensions[1]/2
ymax = ymin + 1
if len(protonPosition) > 0:
# Convert protonPosition from 'real space' to 'index space'
px, py, pz = protonPosition
px = int(px/resolution[0]); py = int(py/resolution[1]); pz = int(pz/resolution[2]);
# Center y-slice on the proton
ymin = py - 1
ymax = py + 1
# what the output should look like
# x ->
# y 000000000000000000000000000000
# | 000000333300000000000000000000
# V 000003344330000000000000000000
# 000033444433000000000000000000
# 000334455443300000000000000000
# 000334455443300000000000000000
# 000033444433000000000000000000
# 000003344330000000000000000000
# 000000333300000000000000000000
# 000000000000000000000000000000
# 000000000000000000000000000000
'''Output will be a CSV table'''
outputFilename = os.path.splitext(filename)[0] + '_shadow.csv'
'''Go through `data` and read the sliced data into a new array'''
shadowData = []
for z in range(dimensions[2]):
line = []
for x in range(dimensions[0]):
value = 0
for y in range(ymin, ymax):
if z >= dimensions[2]:
z -= dimensions[2]
try:
value += data[x][y][z]*dV/resolution[2]
except IndexError:
print 'Index Error'
print x, y, z
sys.exit()
line.append(math.log(value, 10))
shadowData.append(line)
'''Write to CSV file'''
outputString = 'protonPosition,%.3e,%.3e,%.3e\n'%(protonPosition[0], protonPosition[1], protonPosition[2])
outputString += 'z\\x,' + ','.join(['%.3e'%(x*resolution[0]) for x in range(dimensions[0])])
i = 0
for yData in shadowData:
outputString += '\n%.3e,'%(i*resolution[2]) + ','.join(['%.3e'%(xDatum) for xDatum in yData])
i += 1
with open(outputFilename, 'w') as f:
f.write(outputString) | {
"content_hash": "d35a9aa694411a9026d4d450fbd6606d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 127,
"avg_line_length": 30.289473684210527,
"alnum_prop": 0.7085143353605561,
"repo_name": "quells/ElectronDensityIntegration",
"id": "f3ff58384d9d61f79bba6f098cf67d078a93c13d",
"size": "2321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/shadowfinder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21600"
},
{
"name": "Shell",
"bytes": "360"
}
],
"symlink_target": ""
} |
"""Unit-test suite for `pptx.parts.chart` module."""
import pytest
from pptx.chart.chart import Chart
from pptx.chart.data import ChartData
from pptx.enum.chart import XL_CHART_TYPE as XCT
from pptx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from pptx.opc.package import OpcPackage
from pptx.opc.packuri import PackURI
from pptx.oxml.chart.chart import CT_ChartSpace
from pptx.parts.chart import ChartPart, ChartWorkbook
from pptx.parts.embeddedpackage import EmbeddedXlsxPart
from ..unitutil.cxml import element, xml
from ..unitutil.mock import class_mock, instance_mock, method_mock, property_mock
class DescribeChartPart(object):
"""Unit-test suite for `pptx.parts.chart.ChartPart` objects."""
def it_can_construct_from_chart_type_and_data(self, request):
chart_data_ = instance_mock(request, ChartData, xlsx_blob=b"xlsx-blob")
chart_data_.xml_bytes.return_value = b"chart-blob"
package_ = instance_mock(request, OpcPackage)
package_.next_partname.return_value = PackURI("/ppt/charts/chart42.xml")
chart_part_ = instance_mock(request, ChartPart)
# --- load() must have autospec turned off to work in Python 2.7 mock ---
load_ = method_mock(
request, ChartPart, "load", autospec=False, return_value=chart_part_
)
chart_part = ChartPart.new(XCT.RADAR, chart_data_, package_)
package_.next_partname.assert_called_once_with("/ppt/charts/chart%d.xml")
chart_data_.xml_bytes.assert_called_once_with(XCT.RADAR)
load_.assert_called_once_with(
"/ppt/charts/chart42.xml", CT.DML_CHART, package_, b"chart-blob"
)
chart_part_.chart_workbook.update_from_xlsx_blob.assert_called_once_with(
b"xlsx-blob"
)
assert chart_part is chart_part_
def it_provides_access_to_the_chart_object(self, request, chartSpace_):
chart_ = instance_mock(request, Chart)
Chart_ = class_mock(request, "pptx.parts.chart.Chart", return_value=chart_)
chart_part = ChartPart(None, None, None, chartSpace_)
chart = chart_part.chart
Chart_.assert_called_once_with(chart_part._element, chart_part)
assert chart is chart_
def it_provides_access_to_the_chart_workbook(self, request, chartSpace_):
chart_workbook_ = instance_mock(request, ChartWorkbook)
ChartWorkbook_ = class_mock(
request, "pptx.parts.chart.ChartWorkbook", return_value=chart_workbook_
)
chart_part = ChartPart(None, None, None, chartSpace_)
chart_workbook = chart_part.chart_workbook
ChartWorkbook_.assert_called_once_with(chartSpace_, chart_part)
assert chart_workbook is chart_workbook_
# fixture components ---------------------------------------------
@pytest.fixture
def chartSpace_(self, request):
return instance_mock(request, CT_ChartSpace)
class DescribeChartWorkbook(object):
"""Unit-test suite for `pptx.parts.chart.ChartWorkbook` objects."""
def it_can_get_the_chart_xlsx_part(self, chart_part_, xlsx_part_):
chart_part_.related_part.return_value = xlsx_part_
chart_workbook = ChartWorkbook(
element("c:chartSpace/c:externalData{r:id=rId42}"), chart_part_
)
xlsx_part = chart_workbook.xlsx_part
chart_part_.related_part.assert_called_once_with("rId42")
assert xlsx_part is xlsx_part_
def but_it_returns_None_when_the_chart_has_no_xlsx_part(self):
chart_workbook = ChartWorkbook(element("c:chartSpace"), None)
assert chart_workbook.xlsx_part is None
@pytest.mark.parametrize(
"chartSpace_cxml, expected_cxml",
(
(
"c:chartSpace{r:a=b}",
"c:chartSpace{r:a=b}/c:externalData{r:id=rId" "42}/c:autoUpdate{val=0}",
),
(
"c:chartSpace/c:externalData{r:id=rId66}",
"c:chartSpace/c:externalData{r:id=rId42}",
),
),
)
def it_can_change_the_chart_xlsx_part(
self, chart_part_, xlsx_part_, chartSpace_cxml, expected_cxml
):
chart_part_.relate_to.return_value = "rId42"
chart_data = ChartWorkbook(element(chartSpace_cxml), chart_part_)
chart_data.xlsx_part = xlsx_part_
chart_part_.relate_to.assert_called_once_with(xlsx_part_, RT.PACKAGE)
assert chart_data._chartSpace.xml == xml(expected_cxml)
def it_adds_an_xlsx_part_on_update_if_needed(
self, request, chart_part_, package_, xlsx_part_, xlsx_part_prop_
):
EmbeddedXlsxPart_ = class_mock(request, "pptx.parts.chart.EmbeddedXlsxPart")
EmbeddedXlsxPart_.new.return_value = xlsx_part_
chart_part_.package = package_
xlsx_part_prop_.return_value = None
chart_data = ChartWorkbook(element("c:chartSpace"), chart_part_)
chart_data.update_from_xlsx_blob(b"xlsx-blob")
EmbeddedXlsxPart_.new.assert_called_once_with(b"xlsx-blob", package_)
xlsx_part_prop_.assert_called_with(xlsx_part_)
def but_it_replaces_the_xlsx_blob_when_the_part_exists(
self, xlsx_part_prop_, xlsx_part_
):
xlsx_part_prop_.return_value = xlsx_part_
chart_data = ChartWorkbook(None, None)
chart_data.update_from_xlsx_blob(b"xlsx-blob")
assert chart_data.xlsx_part.blob == b"xlsx-blob"
# fixture components ---------------------------------------------
@pytest.fixture
def chart_part_(self, request, package_, xlsx_part_):
return instance_mock(request, ChartPart)
@pytest.fixture
def package_(self, request):
return instance_mock(request, OpcPackage)
@pytest.fixture
def xlsx_part_(self, request):
return instance_mock(request, EmbeddedXlsxPart)
@pytest.fixture
def xlsx_part_prop_(self, request):
return property_mock(request, ChartWorkbook, "xlsx_part")
| {
"content_hash": "0d53203b3e2b4c097e176c7ab56941c2",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 88,
"avg_line_length": 38.41290322580645,
"alnum_prop": 0.6467920725562647,
"repo_name": "scanny/python-pptx",
"id": "ca7fe777109f547d5958e21ff08f1b5eae1dd593",
"size": "5973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parts/test_chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "124592"
},
{
"name": "Makefile",
"bytes": "2055"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "2152173"
}
],
"symlink_target": ""
} |
import errno
import os
import platform
from lxml import etree
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova import utils
from nova.virt import images
from nova.virt import volumeutils
libvirt_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images',
deprecated_group='DEFAULT',
deprecated_name='libvirt_snapshot_compression'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
return volumeutils.get_iscsi_initiator()
def get_fc_hbas():
"""Get the Fibre Channel HBA information."""
out = None
try:
out, err = execute('systool', '-c', 'fc_host', '-v',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
# This handles the case where rootwrap is used
# and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96:
LOG.warn(_("systool is not installed"))
return []
except OSError as exc:
# This handles the case where rootwrap is NOT used
# and systool is not installed
if exc.errno == errno.ENOENT:
LOG.warn(_("systool is not installed"))
return []
if out is None:
raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
lines = out.split('\n')
# ignore the first 2 lines
lines = lines[2:]
hbas = []
hba = {}
lastline = None
for line in lines:
line = line.strip()
# 2 newlines denotes a new hba port
if line == '' and lastline == '':
if len(hba) > 0:
hbas.append(hba)
hba = {}
else:
val = line.split('=')
if len(val) == 2:
key = val[0].strip().replace(" ", "")
value = val[1].strip()
hba[key] = value.replace('"', '')
lastline = line
return hbas
def get_fc_hbas_info():
"""Get Fibre Channel WWNs and device paths from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
hbas_info = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwnn = hba['node_name'].replace('0x', '')
device_path = hba['ClassDevicepath']
device = hba['ClassDevice']
hbas_info.append({'port_name': wwpn,
'node_name': wwnn,
'host_device': device,
'device_path': device_path})
return hbas_info
def get_fc_wwpns():
"""Get Fibre Channel WWPNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwpns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
"""Get Fibre Channel WWNNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwnns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encrypted:
cow_opts += ['encryption=%s' % base_details.encrypted]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def create_lvm_image(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') %
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
if sparse:
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.'),
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
execute(*cmd, run_as_root=True, attempts=3)
def import_rbd_image(*args):
execute('rbd', 'import', *args)
def _run_rbd(*args, **kwargs):
total = list(args)
if CONF.libvirt.rbd_user:
total.extend(['--id', str(CONF.libvirt.rbd_user)])
if CONF.libvirt.images_rbd_ceph_conf:
total.extend(['--conf', str(CONF.libvirt.images_rbd_ceph_conf)])
return utils.execute(*total, **kwargs)
def list_rbd_volumes(pool):
"""List volumes names for given ceph pool.
:param pool: ceph pool name
"""
try:
out, err = _run_rbd('rbd', '-p', pool, 'ls')
except processutils.ProcessExecutionError:
# No problem when no volume in rbd pool
return []
return [line.strip() for line in out.splitlines()]
def remove_rbd_volumes(pool, *names):
"""Remove one or more rbd volume."""
for name in names:
rbd_remove = ['rbd', '-p', pool, 'rm', name]
try:
_run_rbd(*rbd_remove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.warn(_("rbd remove %(name)s in pool %(pool)s failed"),
{'name': name, 'pool': pool})
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
"""
out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def logical_volume_info(path):
"""Get logical volume info.
:param path: logical volume path
"""
out, err = execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def logical_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
"""
out, _err = execute('blockdev', '--getsize64', path, run_as_root=True)
return int(out)
def _zero_logical_volume(path, volume_size):
"""Write zeros over the specified path
:param path: logical volume path
:param size: number of zeros to write
"""
bs = units.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = volume_size
# The loop efficiently writes zeros using dd,
# and caters for versions of dd that don't have
# the easier to use iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (volume_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= units.Ki # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def clear_logical_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
volume_clear = CONF.libvirt.volume_clear
if volume_clear not in ('none', 'shred', 'zero'):
LOG.error(_("ignoring unrecognized volume_clear='%s' value"),
volume_clear)
volume_clear = 'zero'
if volume_clear == 'none':
return
volume_clear_size = int(CONF.libvirt.volume_clear_size) * units.Mi
volume_size = logical_volume_size(path)
if volume_clear_size != 0 and volume_clear_size < volume_size:
volume_size = volume_clear_size
if volume_clear == 'zero':
# NOTE(p-draigbrady): we could use shred to do the zeroing
# with -n0 -z, however only versions >= 8.22 perform as well as dd
_zero_logical_volume(path, volume_size)
elif volume_clear == 'shred':
utils.execute('shred', '-n3', '-s%d' % volume_size, path,
run_as_root=True)
else:
raise exception.Invalid(_("volume_clear='%s' is not handled")
% volume_clear)
def remove_logical_volumes(paths):
"""Remove one or more logical volume."""
errors = []
for path in paths:
clear_logical_volume(path)
lvremove = ('lvremove', '-f', path)
try:
execute(*lvremove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError as exp:
errors.append(str(exp))
if errors:
raise exception.VolumesNotRemoved(reason=(', ').join(errors))
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers, then the name
attribute selects the primary backend driver name, while the optional
type attribute provides the sub-type. For example, xen supports a name
of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2",
while qemu only supports a name of "qemu", but multiple types including
"raw", "bochs", "qcow2", and "qed".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt.virt_type == "xen":
if is_block_dev:
return "phy"
else:
# 4000000 == 4.0.0
if hypervisor_version == 4000000:
return "tap"
else:
return "tap2"
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
dest = "%s:%s" % (host, dest)
# Try rsync first as that can compress and create sparse dest files.
# Note however that rsync currently doesn't read sparse files
# efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918
# At least network traffic is mitigated with compression.
try:
# Do a relatively light weight test first, so that we
# can fall back to scp, without having run out of space
# on the destination for example.
execute('rsync', '--sparse', '--compress', '--dry-run', src, dest)
except processutils.ProcessExecutionError:
execute('scp', src, dest)
else:
execute('rsync', '--sparse', '--compress', src, dest)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
"""Extract a snapshot from a disk image.
Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt.snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device
"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
if CONF.libvirt.virt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path and CONF.libvirt.images_type == 'rbd':
disk_path = source.get('name')
if disk_path:
disk_path = 'rbd:' + disk_path
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
elif path.startswith('rbd:'):
return 'rbd'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id,
max_size=max_size)
def get_instance_path(instance, forceold=False, relative=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:param relative: if True, just the relative path is returned
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
if forceold or os.path.exists(pre_grizzly_name):
if relative:
return instance['name']
return pre_grizzly_name
if relative:
return instance['uuid']
return os.path.join(CONF.instances_path, instance['uuid'])
def get_arch(image_meta):
"""Determine the architecture of the guest (or host).
This method determines the CPU architecture that must be supported by
the hypervisor. It gets the (guest) arch info from image_meta properties,
and it will fallback to the nova-compute (host) arch if no architecture
info is provided in image_meta.
:param image_meta: the metadata associated with the instance image
:returns: guest (or host) architecture
"""
if image_meta:
arch = image_meta.get('properties', {}).get('architecture')
if arch is not None:
return arch
return platform.processor()
def is_mounted(mount_path, source=None):
"""Check if the given source is mounted at given destination point."""
try:
check_cmd = ['findmnt', '--target', mount_path]
if source:
check_cmd.extend(['--source', source])
utils.execute(*check_cmd)
return True
except processutils.ProcessExecutionError as exc:
return False
except OSError as exc:
#info since it's not required to have this tool.
if exc.errno == errno.ENOENT:
LOG.info(_("findmnt tool is not installed"))
return False
| {
"content_hash": "22959e8d4dfd87bb26179f75ceac69c1",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 78,
"avg_line_length": 32.26609442060086,
"alnum_prop": 0.5912476722532588,
"repo_name": "CiscoSystems/nova",
"id": "5378e335bc8b3cd1b174305be11b17a3cdbaccf0",
"size": "23503",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13926229"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def create_or_update(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a local network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local
network gateway operation.
:type parameters: :class:`LocalNetworkGateway
<azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`LocalNetworkGateway
<azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', response)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LocalNetworkGateway
<azure.mgmt.network.v2017_06_01.models.LocalNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LocalNetworkGatewayPaged
<azure.mgmt.network.v2017_06_01.models.LocalNetworkGatewayPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| {
"content_hash": "bd314db752dfceb717b11f7e79084ffa",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 157,
"avg_line_length": 45.85757575757576,
"alnum_prop": 0.6459393378708782,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "5da5462d5dcd7514be3cdb0986f987092fb899ea",
"size": "15607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/local_network_gateways_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
import os.path
import threading
import tempfile
import flask
import mock
from mitmproxy.proxy.config import ProxyConfig
from mitmproxy.proxy.server import ProxyServer
import pathod.test
import pathod.pathoc
from mitmproxy import flow, controller, options
from mitmproxy import builtins
testapp = flask.Flask(__name__)
@testapp.route("/")
def hello():
return "testapp"
@testapp.route("/error")
def error():
raise ValueError("An exception...")
def errapp(environ, start_response):
raise ValueError("errapp")
class TestMaster(flow.FlowMaster):
def __init__(self, opts, config):
s = ProxyServer(config)
state = flow.State()
flow.FlowMaster.__init__(self, opts, s, state)
self.addons.add(opts, *builtins.default_addons())
self.apps.add(testapp, "testapp", 80)
self.apps.add(errapp, "errapp", 80)
self.clear_log()
def clear_log(self):
self.tlog = []
def add_log(self, message, level=None):
self.tlog.append(message)
class ProxyThread(threading.Thread):
def __init__(self, tmaster):
threading.Thread.__init__(self)
self.tmaster = tmaster
self.name = "ProxyThread (%s:%s)" % (
tmaster.server.address.host, tmaster.server.address.port
)
controller.should_exit = False
@property
def port(self):
return self.tmaster.server.address.port
@property
def tlog(self):
return self.tmaster.tlog
def run(self):
self.tmaster.run()
def shutdown(self):
self.tmaster.shutdown()
class ProxyTestBase(object):
# Test Configuration
ssl = None
ssloptions = False
masterclass = TestMaster
add_upstream_certs_to_client_chain = False
@classmethod
def setup_class(cls):
cls.server = pathod.test.Daemon(
ssl=cls.ssl,
ssloptions=cls.ssloptions)
cls.server2 = pathod.test.Daemon(
ssl=cls.ssl,
ssloptions=cls.ssloptions)
opts = cls.get_options()
cls.config = ProxyConfig(opts)
tmaster = cls.masterclass(opts, cls.config)
tmaster.start_app(options.APP_HOST, options.APP_PORT)
cls.proxy = ProxyThread(tmaster)
cls.proxy.start()
@classmethod
def teardown_class(cls):
# perf: we want to run tests in parallell
# should this ever cause an error, travis should catch it.
# shutil.rmtree(cls.cadir)
cls.proxy.shutdown()
cls.server.shutdown()
cls.server2.shutdown()
def setup(self):
self.master.clear_log()
self.master.state.clear()
self.server.clear_log()
self.server2.clear_log()
@property
def master(self):
return self.proxy.tmaster
@classmethod
def get_options(cls):
cls.cadir = os.path.join(tempfile.gettempdir(), "mitmproxy")
return options.Options(
listen_port=0,
cadir=cls.cadir,
add_upstream_certs_to_client_chain=cls.add_upstream_certs_to_client_chain,
ssl_insecure=True,
)
class HTTPProxyTest(ProxyTestBase):
def pathoc_raw(self):
return pathod.pathoc.Pathoc(("127.0.0.1", self.proxy.port), fp=None)
def pathoc(self, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathod.pathoc.Pathoc(
("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None
)
if self.ssl:
p.connect(("127.0.0.1", self.server.port))
else:
p.connect()
return p
def pathod(self, spec, sni=None):
"""
Constructs a pathod GET request, with the appropriate base and proxy.
"""
p = self.pathoc(sni=sni)
if self.ssl:
q = "get:'/p/%s'" % spec
else:
q = "get:'%s/p/%s'" % (self.server.urlbase, spec)
return p.request(q)
def app(self, page):
if self.ssl:
p = pathod.pathoc.Pathoc(
("127.0.0.1", self.proxy.port), True, fp=None
)
p.connect((options.APP_HOST, options.APP_PORT))
return p.request("get:'%s'" % page)
else:
p = self.pathoc()
return p.request("get:'http://%s%s'" % (options.APP_HOST, page))
class TResolver:
def __init__(self, port):
self.port = port
def original_addr(self, sock):
return ("127.0.0.1", self.port)
class TransparentProxyTest(ProxyTestBase):
ssl = None
resolver = TResolver
@classmethod
def setup_class(cls):
super(TransparentProxyTest, cls).setup_class()
cls._resolver = mock.patch(
"mitmproxy.platform.resolver",
new=lambda: cls.resolver(cls.server.port)
)
cls._resolver.start()
@classmethod
def teardown_class(cls):
cls._resolver.stop()
super(TransparentProxyTest, cls).teardown_class()
@classmethod
def get_options(cls):
opts = ProxyTestBase.get_options()
opts.mode = "transparent"
return opts
def pathod(self, spec, sni=None):
"""
Constructs a pathod GET request, with the appropriate base and proxy.
"""
if self.ssl:
p = self.pathoc(sni=sni)
q = "get:'/p/%s'" % spec
else:
p = self.pathoc()
q = "get:'/p/%s'" % spec
return p.request(q)
def pathoc(self, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathod.pathoc.Pathoc(
("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None
)
p.connect()
return p
class ReverseProxyTest(ProxyTestBase):
ssl = None
@classmethod
def get_options(cls):
opts = ProxyTestBase.get_options()
opts.upstream_server = "".join(
[
"https" if cls.ssl else "http",
"://",
"127.0.0.1:",
str(cls.server.port)
]
)
opts.mode = "reverse"
return opts
def pathoc(self, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathod.pathoc.Pathoc(
("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None
)
p.connect()
return p
def pathod(self, spec, sni=None):
"""
Constructs a pathod GET request, with the appropriate base and proxy.
"""
if self.ssl:
p = self.pathoc(sni=sni)
q = "get:'/p/%s'" % spec
else:
p = self.pathoc()
q = "get:'/p/%s'" % spec
return p.request(q)
class SocksModeTest(HTTPProxyTest):
@classmethod
def get_options(cls):
opts = ProxyTestBase.get_options()
opts.mode = "socks5"
return opts
class ChainProxyTest(ProxyTestBase):
"""
Chain three instances of mitmproxy in a row to test upstream mode.
Proxy order is cls.proxy -> cls.chain[0] -> cls.chain[1]
cls.proxy and cls.chain[0] are in upstream mode,
cls.chain[1] is in regular mode.
"""
chain = None
n = 2
@classmethod
def setup_class(cls):
cls.chain = []
super(ChainProxyTest, cls).setup_class()
for _ in range(cls.n):
opts = cls.get_options()
config = ProxyConfig(opts)
tmaster = cls.masterclass(opts, config)
proxy = ProxyThread(tmaster)
proxy.start()
cls.chain.insert(0, proxy)
# Patch the orginal proxy to upstream mode
opts = cls.get_options()
cls.config = cls.proxy.tmaster.config = cls.proxy.tmaster.server.config = ProxyConfig(opts)
@classmethod
def teardown_class(cls):
super(ChainProxyTest, cls).teardown_class()
for proxy in cls.chain:
proxy.shutdown()
def setup(self):
super(ChainProxyTest, self).setup()
for proxy in self.chain:
proxy.tmaster.clear_log()
proxy.tmaster.state.clear()
@classmethod
def get_options(cls):
opts = super(ChainProxyTest, cls).get_options()
if cls.chain: # First proxy is in normal mode.
opts.update(
mode="upstream",
upstream_server="http://127.0.0.1:%s" % cls.chain[0].port
)
return opts
class HTTPUpstreamProxyTest(ChainProxyTest, HTTPProxyTest):
pass
| {
"content_hash": "a058242a2fdf1a212c42cecd41d988ef",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 99,
"avg_line_length": 26.141104294478527,
"alnum_prop": 0.5677071110068059,
"repo_name": "jvillacorta/mitmproxy",
"id": "1597f59cfd9fdbddc02a40efa68b482c016a7b6c",
"size": "8522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mitmproxy/tservers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "186239"
},
{
"name": "HTML",
"bytes": "3034"
},
{
"name": "JavaScript",
"bytes": "146506"
},
{
"name": "PowerShell",
"bytes": "362"
},
{
"name": "Python",
"bytes": "1270398"
},
{
"name": "Shell",
"bytes": "3717"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import inspect
import os
import psutil
from pikos.monitors.line_monitor import LineMonitor
from pikos.monitors.records import LineMemoryRecord
class LineMemoryMonitor(LineMonitor):
""" Record process memory on python line events.
The class hooks on the settrace function to receive trace events and
record the current process memory when a line of code is about to be
executed.
"""
def __init__(self, recorder, record_type=None):
""" Initialize the monitoring class.
Parameters
----------
recorder : object
A subclass of :class:`~pikos.recorders.AbstractRecorder` or a
class that implements the same interface to handle the values
to be recorded.
record_type: class object
A class object to be used for records. Default is
:class:`~pikos.monitors.records.LineMemoryMonitor`
"""
if record_type is None:
record_type = LineMemoryRecord
super(LineMemoryMonitor, self).__init__(recorder, record_type)
self._process = None
def enable(self):
""" Enable the monitor.
The first time the method is called (the context is entered) it will
initialize the Process class, set the settrace hooks and initialize
the recorder.
"""
if self._call_tracker('ping'):
self._process = psutil.Process(os.getpid())
self._recorder.prepare(self._record_type)
self._tracer.replace(self.on_line_event)
def disable(self):
""" Disable the monitor.
The last time the method is called (the context is exited) it will
unset the settrace hooks and finalize the recorder and set
:attr:`_process` to None.
"""
if self._call_tracker('pong'):
self._tracer.recover()
self._recorder.finalize()
self._process = None
def gather_info(self, frame):
""" Gather memory information for the line.
"""
rss, vms = self._process.memory_info()
filename, lineno, function, line, _ = \
inspect.getframeinfo(frame, context=1)
if line is None:
line = ['<compiled string>']
return (
self._index, function, lineno, rss, vms, line[0].rstrip(),
filename)
| {
"content_hash": "9b5b8e9d8843996ebc11333e1117c652",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 31.486842105263158,
"alnum_prop": 0.6126201420810697,
"repo_name": "enthought/pikos",
"id": "b745f43e51aeb4c6b30096c93cd379be5df19ff1",
"size": "2733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pikos/monitors/line_memory_monitor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "38343"
},
{
"name": "Python",
"bytes": "327111"
},
{
"name": "Shell",
"bytes": "177"
}
],
"symlink_target": ""
} |
"""
An example to compare classmethod and staticmethod
"""
class demo(object):
@classmethod
def klassmeth(*args):
return args
@staticmethod
def statmeth(*args):
return args
| {
"content_hash": "1f2cb990793861c43977886b43fbce46",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 50,
"avg_line_length": 19.09090909090909,
"alnum_prop": 0.6333333333333333,
"repo_name": "helloTC/LearnPython",
"id": "994966781fb168975435da2718a2e24b7d635b2d",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fluent_python/object/csmeth_comp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90925"
}
],
"symlink_target": ""
} |
import numpy as np
from math import log
from rsfmodel import rsf, plot, staterelations
# This is really just the Ruina realtion, but let's pretend we invented it!
# We'll inherit attributes from rsf.StateRelation, but you wouldn't have to.
# It does provide velocity contribution calculation for us though!
class MyStateRelation(staterelations.StateRelation):
# Need to provide a steady state calcualtion method
def set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
model = rsf.Model()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.01 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = MyStateRelation()
state1.b = 0.005 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
model.state_relations = [state1] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.time = np.arange(0, 40.01, 0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
model.solve()
# Make the phase plot
plot.phasePlot(model)
# Make a plot in displacement
plot.dispPlot(model)
# Make a plot in time
plot.timePlot(model)
| {
"content_hash": "e7ca13c86f979ac65c8bb8b2b9751efd",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 92,
"avg_line_length": 32.924528301886795,
"alnum_prop": 0.7312320916905444,
"repo_name": "jrleeman/rsfmodel",
"id": "8e49a7267c47fa07ef30519ad29d301cd6974414",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/defining_new_state_relation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62423"
}
],
"symlink_target": ""
} |
from django.shortcuts import redirect
from cuckoo.common.config import config
from cuckoo.misc import version
class CuckooAuthentication(object):
def process_request(self, request):
if request.path.startswith(("/secret/", "/static/")):
return
# If no web_secret has been initialized, ignore this functionality.
if not config("cuckoo:cuckoo:web_secret"):
return
if not request.session.get("auth"):
return redirect("/secret/")
class CuckooHeaders(object):
"""Set Cuckoo custom response headers."""
def process_response(self, request, response):
response["Server"] = "Machete Server"
response["X-Cuckoo-Version"] = version
response["X-Content-Type-Options"] = "nosniff"
response["X-Frame-Options"] = "DENY"
response["X-XSS-Protection"] = "1; mode=block"
response["Pragma"] = "no-cache"
response["Cache-Control"] = "no-cache"
response["Expires"] = "0"
return response
| {
"content_hash": "f1a5900ce386879e7fd5da5fc6571499",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 36.42857142857143,
"alnum_prop": 0.6352941176470588,
"repo_name": "cuckoobox/cuckoo",
"id": "68380910cc6a828caf85ae913b5365cbdee4d0b4",
"size": "1229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuckoo/web/web/middle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9652"
},
{
"name": "CSS",
"bytes": "6810"
},
{
"name": "DTrace",
"bytes": "8609"
},
{
"name": "HTML",
"bytes": "233053"
},
{
"name": "JavaScript",
"bytes": "21397"
},
{
"name": "Makefile",
"bytes": "58"
},
{
"name": "Mako",
"bytes": "1078"
},
{
"name": "Python",
"bytes": "1101334"
},
{
"name": "Shell",
"bytes": "59602"
},
{
"name": "Visual Basic",
"bytes": "1101"
}
],
"symlink_target": ""
} |
from statsmodels.compat.python import lzip
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
if dummy_ind.size == 0: # do not waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
if count_ind.size == 0: # do not waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata does not handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array_like
estimated model parameters
exog : array_like
exogenous variables at which to calculate the derivative
cov_params : array_like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array_like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array_like
Indices of the columns of exog that contain dummy variables
count_ind : array_like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf does not take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this will not go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
raise NotImplementedError
def _check_at_is_all(method):
if method['at'] == 'all':
raise ValueError("Only margeff are available when `at` is "
"'all'. Please input specific points if you would "
"like to do inference.")
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins:
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = {}
self.results = results
self.dist = dist
self.get_margeff(margeff_args)
def _reset(self):
self._cache = {}
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self.get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def summary_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins:
"""Get marginal effects of a Discrete Choice model.
Parameters
----------
results : DiscreteResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
Notes
-----
The dataframe is created on each call and not cached, as are the
tables build in `summary()`
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = self.results.model
from pandas import DataFrame, MultiIndex
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
k_extra = getattr(model, 'k_extra', 0)
if k_extra > 0:
exog_names = exog_names[:-k_extra]
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
if self.margeff.ndim == 2:
# MNLogit case
ci = self.conf_int(alpha)
table = np.column_stack([i.ravel("F") for i in
[self.margeff, self.margeff_se, self.tvalues,
self.pvalues, ci[:, 0, :], ci[:, 1, :]]])
_, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
ynames = np.repeat(yname_list, len(var_names))
xnames = np.tile(var_names, len(yname_list))
index = MultiIndex.from_tuples(list(zip(ynames, xnames)),
names=['endog', 'exog'])
else:
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
index=var_names
return DataFrame(table, columns=names, index=index)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# TODO: sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx[0])
if getattr(model, 'k_extra', 0) > 0:
exog_names = exog_names[:-model.k_extra]
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[' + str(alpha/2), str(1-alpha/2) + ']']
tble.insert_header_row(0, header)
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[' + str(alpha/2), str(1-alpha/2) + ']']
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# attach dummy_idx and cout_idx
self.dummy_idx = dummy_idx
self.count_idx = count_idx
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
if at == 'all':
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[:, effects_idx].reshape(-1, K, J,
order='F')
else:
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J,
order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# do not care about at constant
# hack truncate effects_idx again if necessary
# if eyex, then effects is truncated to be without extra params
effects_idx = effects_idx[:len(effects)]
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
| {
"content_hash": "5f16ed5474e9ecb783cf03361b4c683c",
"timestamp": "",
"source": "github",
"line_count": 730,
"max_line_length": 83,
"avg_line_length": 36.38493150684931,
"alnum_prop": 0.559542185911675,
"repo_name": "josef-pkt/statsmodels",
"id": "dc7be4ceae8be410ad7a8b70bdb9978b8c44827d",
"size": "26628",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "statsmodels/discrete/discrete_margins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14428857"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25322"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
"""Python 2/3 compatibility definitions.
These are used by the rest of Elpy to keep compatibility definitions
in one place.
"""
import sys
if sys.version_info >= (3, 0):
PYTHON3 = True
from io import StringIO
def ensure_not_unicode(obj):
return obj
else:
PYTHON3 = False
from StringIO import StringIO # noqa
def ensure_not_unicode(obj):
"""Return obj. If it's a unicode string, convert it to str first.
Pydoc functions simply don't find anything for unicode
strings. No idea why.
"""
if isinstance(obj, unicode):
return obj.encode("utf-8")
else:
return obj
| {
"content_hash": "6203f985e3244f324bf3f38920880432",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 20.393939393939394,
"alnum_prop": 0.6225854383358098,
"repo_name": "zhangyunfeng/.emacs.d",
"id": "ebe91a235ebbfe8f9b61fe68d88f461bc9eb1e5b",
"size": "673",
"binary": false,
"copies": "44",
"ref": "refs/heads/master",
"path": "elpa/elpy-20170401.1152/elpy/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2894"
},
{
"name": "Common Lisp",
"bytes": "1252747"
},
{
"name": "Emacs Lisp",
"bytes": "16710934"
},
{
"name": "HTML",
"bytes": "30070"
},
{
"name": "JavaScript",
"bytes": "5011725"
},
{
"name": "Makefile",
"bytes": "4157"
},
{
"name": "NewLisp",
"bytes": "690"
},
{
"name": "Python",
"bytes": "167891"
},
{
"name": "Roff",
"bytes": "3082"
},
{
"name": "Ruby",
"bytes": "13769"
},
{
"name": "Scheme",
"bytes": "134939"
},
{
"name": "Shell",
"bytes": "4223"
},
{
"name": "Standard ML",
"bytes": "9321"
}
],
"symlink_target": ""
} |
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine('sqlite:///agora.sqlite')
DBSession = scoped_session(sessionmaker())
DBSession.configure(bind=engine)
__all__ = ['DBSession']
| {
"content_hash": "1cacbde468117f8d656f9bc0d5ccf15f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 30.5,
"alnum_prop": 0.7745901639344263,
"repo_name": "cullerton/cullerton.agora",
"id": "2b28b522c1c6f7abca12ae419e6def011b32c6fe",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cullerton/agora/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8434"
},
{
"name": "Python",
"bytes": "24118"
}
],
"symlink_target": ""
} |
import sublime
import os
import sys
from .pathhelper import *
def plugin_loaded():
global TEMPLATE_FOLDER
TEMPLATE_FOLDER = os.path.join(sublime.packages_path(), 'User', '.FileManager')
if not os.path.exists(TEMPLATE_FOLDER):
makedirs(TEMPLATE_FOLDER)
def md(*t, **kwargs):
sublime.message_dialog(kwargs.get('sep', '\n').join([str(el) for el in t]))
def sm(*t, **kwargs): sublime.status_message(kwargs.get('sep', ' ').join([str(el) for el in t]))
def em(*t, **kwargs): sublime.error_message(kwargs.get('sep', ' ').join([str(el) for el in t]))
def isST3():
return int(sublime.version()) > 3000
def get_settings():
return sublime.load_settings('FileManager.sublime-settings')
def refresh_sidebar(settings=None, window=None):
if window is None:
window = active_window()
if settings is None:
settings = window.active_view().settings()
if settings.get('explicitly_refresh_sidebar') is True:
window.run_command('refresh_folder_list')
def makedirs(path, exist_ok=True):
if exist_ok is False:
os.makedirs(path)
else:
try:
os.makedirs(path)
except OSError:
pass
def quote(s):
return '"{0}"'.format(s)
def get_window():
return sublime.active_window()
def get_view():
window = get_window()
if not window: return
return window.active_view()
def copy(el):
return sublime.set_clipboard(el)
def file_get_content(path):
with open(path, 'r') as fp:
return fp.read()
def get_template(created_file):
"""Return the right template for the create file"""
template_files = os.listdir(TEMPLATE_FOLDER)
for item in template_files:
if os.path.splitext(item)[0] == 'template' and os.path.splitext(item)[1] == os.path.splitext(created_file)[1]:
return file_get_content(os.path.join(TEMPLATE_FOLDER, item))
return ''
def isdigit(string):
try:
int(string)
except ValueError:
return False
else:
return True
def yes_no_cancel_panel(message, yes, no, cancel, yes_text='Yes', no_text='No', cancel_text='Cancel', **kwargs):
loc = locals()
if isinstance(message, list):
message.append('Do not select this item')
else:
message = [message, 'Do not select this item']
items = [message, yes_text, no_text, cancel_text]
def get_max(item):
return len(item)
maxi = len(max(items, key=get_max))
for i, item in enumerate(items):
while len(items[i]) < maxi:
items[i].append('')
def on_done(index):
if index in [-1, 3] and cancel:
return cancel(*kwargs.get('args', []), **kwargs.get('kwargs', {}))
elif index == 1 and yes:
return yes(*kwargs.get('args', []), **kwargs.get('kwargs', {}))
elif index == 2 and no:
return no(*kwargs.get('args', []), **kwargs.get('kwargs', {}))
elif index == 0:
return yes_no_cancel_panel(**loc)
window = get_window()
window.show_quick_panel(items, on_done, 0, 1)
def close_view(view_to_close):
if isST3():
view_to_close.close()
return
window = view_to_close.window()
window.focus_view(view_to_close)
window.run_command('close')
def to_snake_case(camelCaseString):
snake = ''
for char in camelCaseString:
if char.isupper():
if snake == '':
snake += char.lower()
else:
snake += '_' + char.lower()
else:
snake += char
return snake
def StdClass(name='Unknown'):
# add the str() function because of the unicode in Python 2
return type(str(name).title(), (), {})
| {
"content_hash": "86dce98f7530166613233db61ff5488d",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 118,
"avg_line_length": 28.875,
"alnum_prop": 0.6011904761904762,
"repo_name": "ameistad/FileManager",
"id": "5771cff15f39ed3ae930f64c8f0190bfb9df82e5",
"size": "3696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sublimefunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62826"
}
],
"symlink_target": ""
} |
import config
import urllib
import simpleoauth2
import rs as qboxrs
import rscli
import digestoauth
import uptoken
config.ACCESS_KEY = '<Please apply your access key>'
config.SECRET_KEY = '<Dont send your secret key to anyone>'
bucket = 'test_photos'
key = 'test.jpg'
customer = 'end_user_id'
demo_domain = 'test_photos.dn.qbox.me'
tokenObj = uptoken.UploadToken(bucket, 3600, "", "", customer)
uploadToken = tokenObj.generate_token()
print "Upload Token is: %s" % uploadToken
resp = rscli.UploadFile(bucket, key, 'image/jpg', key, '', '', uploadToken)
print '\n===> UploadFile %s result:' % key
print resp
client = digestoauth.Client()
rs = qboxrs.Service(client, bucket)
resp = rs.Stat(key)
print '\n===> Stat %s result:' % key
print resp
| {
"content_hash": "2aa76d15c23137a845a5c729c0b13193",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 24.9,
"alnum_prop": 0.7175368139223561,
"repo_name": "rnoldo/django-avatar",
"id": "bb601b92434c4ca50b9e3e48c49bccf5d9c99d33",
"size": "770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storages/qbox/put_demo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "170464"
}
],
"symlink_target": ""
} |
""" a module to convert between the old (Python script) plugin format,
and the new (JSON) one
"""
from typing import Dict, Tuple # noqa: F401
import ast
import json
def assess_syntax(path):
with open(path) as file_obj:
content = file_obj.read()
syntax_tree = ast.parse(content)
docstring = "" # docstring = ast.get_docstring(syntaxTree)
unknowns = []
imported = {}
assignments = {}
for i, child in enumerate(ast.iter_child_nodes(syntax_tree)):
if i == 0 and isinstance(child, ast.Expr) and isinstance(child.value, ast.Str):
docstring = child.value.s
elif isinstance(child, ast.ImportFrom):
module = child.module
for n in child.names:
import_pth = module + "." + n.name
imported[n.name if n.asname is None else n.asname] = import_pth
elif isinstance(child, ast.Assign):
targets = child.targets
if len(targets) > 1:
raise IOError(
"cannot handle expansion assignments " "(e.g. `a, b = [1, 2]`)"
)
target = child.targets[0] # type: ast.Name
assignments[target.id] = child.value
else:
unknowns.append(child)
if unknowns:
print(
"Warning this script can only handle 'ImportFrom' and 'Assign' "
"syntax, found additional items: {}".format(unknowns)
)
return docstring, imported, assignments
def ast_to_json(item, imported, assignments):
"""recursively convert ast items to json friendly values"""
value = None
if item in ["True", "False", "None"]: # python 2.7
value = {"True": True, "False": False, "None": None}[item]
elif hasattr(ast, "NameConstant") and isinstance(item, ast.NameConstant):
value = item.value
elif isinstance(item, ast.Str):
value = item.s
elif isinstance(item, ast.Num):
value = item.n
elif isinstance(item, ast.Name):
if item.id in imported:
value = imported[item.id]
elif item.id in assignments:
value = ast_to_json(assignments[item.id], imported, assignments)
elif item.id in ["True", "False", "None"]: # python 2.7
value = {"True": True, "False": False, "None": None}[item.id]
else:
raise ValueError("could not find assignment '{}' in config".format(item.id))
elif isinstance(item, (ast.List, ast.Tuple, ast.Set)):
value = [ast_to_json(i, imported, assignments) for i in item.elts]
elif isinstance(item, ast.Dict):
value = convert_dict(item, imported, assignments)
else:
raise ValueError("could not handle ast item: {}".format(item))
return value
def convert_dict(dct, imported, assignments):
# type: (ast.Dict, Dict[str, str], dict) -> dict
"""recurse through and replace keys"""
out_dict = {}
for key, val in zip(dct.keys, dct.values):
if not isinstance(key, ast.Str):
raise ValueError("expected key to be a Str; {}".format(key))
out_dict[key.s] = ast_to_json(val, imported, assignments)
return out_dict
def convert_oformat(oformat):
if oformat == "Notebook":
outline = None # TODO do notebooks need template (they have currently)
exporter = "nbconvert.exporters.NotebookExporter"
elif oformat == "Latex":
exporter = "nbconvert.exporters.LatexExporter"
outline = {
"module": "ipypublish.templates.outline_schemas",
"file": "latex_outline.latex.j2",
}
elif oformat == "HTML":
exporter = "nbconvert.exporters.HTMLExporter"
outline = {
"module": "ipypublish.templates.outline_schemas",
"file": "html_outline.html.j2",
}
elif oformat == "Slides":
exporter = "nbconvert.exporters.SlidesExporter"
outline = {
"module": "ipypublish.templates.outline_schemas",
"file": "html_outline.html.j2",
}
else:
raise ValueError(
"expected oformat to be: " "'Notebook', 'Latex', 'HTML' or 'Slides'"
)
return exporter, outline
def convert_config(config, exporter_class, allow_other):
# type: (dict, str) -> dict
"""convert config into required exporter format"""
filters = {}
preprocs = {}
other = {}
# first parse
for key, val in config.items():
# TODO Exporter.filters and TemplateExporter.filters always the same?
if key in ["Exporter.filters", "TemplateExporter.filters"]:
filters.update(config[key])
if key in ["Exporter.preprocessors", "TemplateExporter.preprocessors"]:
if preprocs:
raise ValueError(
"'config' contains both Exporter.preprocessors and "
"TemplateExporter.preprocessors"
)
for p in val:
pname = p.split(".")[-1]
preprocs[pname] = {"class": p, "args": {}}
# TODO move these special cases to seperate input/function
if pname in ["LatexDocLinks", "LatexDocHTML"]:
preprocs[pname]["args"]["metapath"] = "${meta_path}"
preprocs[pname]["args"]["filesfolder"] = "${files_path}"
# second parse
for key, val in config.items():
if key in [
"Exporter.filters",
"TemplateExporter.filters",
"Exporter.preprocessors",
"TemplateExporter.preprocessors",
]:
continue
if key.split(".")[0] in preprocs:
preprocs[key.split(".")[0]]["args"][".".join(key.split(".")[1:])] = val
else:
other[key] = val
if other and not allow_other:
print("Warning: ignoring other args: {}".format(other))
other = {}
output = {
"class": exporter_class,
"filters": filters,
"preprocessors": list(preprocs.values()),
"other_args": other,
}
return output
def replace_template_path(path):
""" replace original template path with new dict """
segments = path.split(".")
module = ".".join(segments[0:-1])
name = segments[-1]
if module == "ipypublish.html.ipypublish":
return {
"module": "ipypublish.templates.segments",
"file": "ipy-{0}.html-tplx.json".format(name),
}
elif module == "ipypublish.html.standard":
return {
"module": "ipypublish.templates.segments",
"file": "std-{0}.html-tplx.json".format(name),
}
elif module == "ipypublish.latex.standard":
return {
"module": "ipypublish.templates.segments",
"file": "std-{0}.latex-tpl.json".format(name),
}
elif module == "ipypublish.latex.ipypublish":
return {
"module": "ipypublish.templates.segments",
"file": "ipy-{0}.latex-tpl.json".format(name),
}
else:
print("Warning: unknown template path: {}".format(path))
return {"module": module, "file": "{0}.json".format(name)}
def create_json(docstring, imported, assignments, allow_other=True):
# type: (str, Dict[str, str], dict, bool) -> dict
"""Set docstring here.
Parameters
----------
docstring: str
the doc string of the module
imported: dict
imported classes
assignments: dict
assigned values (i.e. 'a = b')
allow_other: bool
whether to allow arguments in config,
which do not relate to preprocessors
Returns
-------
"""
oformat = None
config = None
template = None
for value, expr in assignments.items():
if value == "oformat":
if not isinstance(expr, ast.Str):
raise ValueError("expected 'oformat' to be a Str; {}".format(expr))
oformat = expr.s
elif value == "config":
if not isinstance(expr, ast.Dict):
raise ValueError("expected 'config' to be a Dict; {}".format(expr))
config = convert_dict(expr, imported, assignments)
elif value == "template":
if not isinstance(expr, ast.Call):
raise ValueError("expected 'config' to be a call to create_tpl(x)")
# func = expr.func # TODO make sure func name is create_tpl/tplx
args = expr.args
keywords = expr.keywords
if len(args) != 1 or len(keywords) > 0:
raise ValueError("expected create_tpl(x) to have one argument")
seg_list = args[0]
if isinstance(seg_list, ast.ListComp):
seg_list = seg_list.generators[0].iter
if not isinstance(seg_list, ast.List):
raise ValueError(
"expected create_tpl(x) arg to be a List; {}".format(seg_list)
)
segments = []
for seg in seg_list.elts:
if isinstance(seg, ast.Attribute):
seg_name = seg.value.id
elif isinstance(seg, ast.Name):
seg_name = seg.id
else:
raise ValueError(
"expected seg in template to be an Attribute; "
+ "{1}".format(seg)
)
if seg_name not in imported:
raise ValueError("segment '{}' not found".format(seg_name))
segments.append(imported[seg_name])
template = segments
if oformat is None:
raise ValueError("could not find 'oformat' assignment")
if config is None:
raise ValueError("could not find 'config' assignment")
if template is None:
raise ValueError("could not find 'template' assignment")
exporter_class, outline = convert_oformat(oformat)
exporter = convert_config(config, exporter_class, allow_other)
if any(["biblio_natbib" in s for s in template]):
exporter["filters"]["strip_ext"] = "ipypublish.filters.filters.strip_ext"
return {
"description": docstring.splitlines(),
"exporter": exporter,
"template": None
if outline is None
else {
"outline": outline,
"segments": [replace_template_path(s) for s in template],
},
}
def convert_to_json(path, outpath=None, ignore_other=False):
"""Set docstring here.
Parameters
----------
path: str
input module path
outpath=None: str or None
if set, output json to this path
ignore_other: bool
whether to ignore arguments in config,
which do not relate to preprocessors
Returns
-------
"""
_docstring, _imported, _assignments = assess_syntax(path)
# print(_docstring)
# print()
# print(_imported)
# print()
# print(_assignments)
output = create_json(_docstring, _imported, _assignments, not ignore_other)
if outpath:
with open(outpath, "w") as file_obj:
json.dump(output, file_obj, indent=2)
return json.dumps(output, indent=2)
if __name__ == "__main__":
if False:
import glob
import os
for path in glob.glob(
"/Users/cjs14/GitHub/ipypublish" "/ipypublish/export_plugins/*.py"
):
dirname = os.path.dirname(path)
name = os.path.splitext(os.path.basename(path))[0]
try:
convert_to_json(
path, os.path.join(dirname, name + ".json"), ignore_other=True
)
except ValueError as err:
print("{0} failed: {1}".format(path, err))
convert_to_json(
"/Users/cjs14/GitHub/ipypublish" "/ipypublish_plugins/example_new_plugin.py",
"/Users/cjs14/GitHub/ipypublish" "/ipypublish_plugins/example_new_plugin.json",
)
| {
"content_hash": "361a8ebc9a11efec426baf844f938927",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 88,
"avg_line_length": 34.78654970760234,
"alnum_prop": 0.5645120618643356,
"repo_name": "chrisjsewell/ipypublish",
"id": "aa7f362366cfb2ef45f260446012fa04783f0929",
"size": "11897",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ipypublish/port_api/plugin_to_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4549"
},
{
"name": "CSS",
"bytes": "7275"
},
{
"name": "HTML",
"bytes": "7194717"
},
{
"name": "JavaScript",
"bytes": "2124646"
},
{
"name": "Jupyter Notebook",
"bytes": "1319557"
},
{
"name": "Makefile",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "494303"
},
{
"name": "Shell",
"bytes": "552"
},
{
"name": "TeX",
"bytes": "267595"
}
],
"symlink_target": ""
} |
import pytest
from mitmproxy.contentviews import xml_html
from . import full_eval
datadir = "mitmproxy/contentviews/test_xml_html_data/"
def test_simple(tdata):
v = full_eval(xml_html.ViewXmlHtml())
assert v(b"foo") == ('XML', [[('text', 'foo')]])
assert v(b"<html></html>") == ('HTML', [[('text', '<html></html>')]])
assert v(b"<>") == ('XML', [[('text', '<>')]])
assert v(b"<p") == ('XML', [[('text', '<p')]])
with open(tdata.path(datadir + "simple.html")) as f:
input = f.read()
tokens = xml_html.tokenize(input)
assert str(next(tokens)) == "Tag(<!DOCTYPE html>)"
@pytest.mark.parametrize("filename", [
"simple.html",
"cdata.xml",
"comment.xml",
"inline.html",
"test.html"
])
def test_format_xml(filename, tdata):
path = tdata.path(datadir + filename)
with open(path) as f:
input = f.read()
with open("-formatted.".join(path.rsplit(".", 1))) as f:
expected = f.read()
tokens = xml_html.tokenize(input)
assert xml_html.format_xml(tokens) == expected
def test_render_priority():
v = xml_html.ViewXmlHtml()
assert v.render_priority(b"data", content_type="text/xml")
assert v.render_priority(b"data", content_type="text/xml")
assert v.render_priority(b"data", content_type="text/html")
assert not v.render_priority(b"data", content_type="text/plain")
assert not v.render_priority(b"", content_type="text/xml")
assert v.render_priority(b"<html/>")
| {
"content_hash": "3baf01da681630a7536f0502e540f68d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 32.15217391304348,
"alnum_prop": 0.6118999323867478,
"repo_name": "Kriechi/mitmproxy",
"id": "fc1bac5c8eb81e644efc7d3a4229a0107f52c9e4",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/dns-addon",
"path": "test/mitmproxy/contentviews/test_xml_html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20958"
},
{
"name": "Dockerfile",
"bytes": "1031"
},
{
"name": "HTML",
"bytes": "14827"
},
{
"name": "JavaScript",
"bytes": "277250"
},
{
"name": "PowerShell",
"bytes": "495"
},
{
"name": "Python",
"bytes": "1880661"
},
{
"name": "Shell",
"bytes": "4711"
}
],
"symlink_target": ""
} |
'''
This module retrieves information of all the courses offered by Coursera
through their Catalog APIs
Link to documentation:
https://tech.coursera.org/app-platform/catalog/
'''
import requests
class CourseraAPI(object):
'''
This class defines attributes and methods for the Coursera Catalog API
'''
COURSERA_CATALOG_API_ENDPOINT_COURSES = 'https://api.coursera.org/api/catalog.v1/courses?fields=name,shortDescription,photo,video,faq,aboutTheCourse,courseSyllabus,recommendedBackground,aboutTheInstructor&includes=instructors,categories,universities'
COURSERA_CATALOG_API_ENDPOINT_UNIVERSITIES = 'https://api.coursera.org/api/catalog.v1/universities?fields=name'
COURSERA_CATALOG_API_ENDPOINT_CATEGORIES = 'https://api.coursera.org/api/catalog.v1/categories'
COURSERA_CATALOG_API_ENDPOINT_INSTRUCTORS = 'https://api.coursera.org/api/catalog.v1/instructors?fields=fullName,bio,photo150'
def __init__(self):
self.response_courses = requests.get(CourseraAPI.COURSERA_CATALOG_API_ENDPOINT_COURSES)
self.response_universities = requests.get(CourseraAPI.COURSERA_CATALOG_API_ENDPOINT_UNIVERSITIES)
self.response_categories = requests.get(CourseraAPI.COURSERA_CATALOG_API_ENDPOINT_CATEGORIES)
self.response_instructors = requests.get(CourseraAPI.COURSERA_CATALOG_API_ENDPOINT_INSTRUCTORS)
def mongofy_courses(self):
'''
Convert list of courses to a format that follows the database schema
which will be inserted into the MongoDB database
'''
result = []
for item in self.response_courses.json()['elements']:
course = {}
course['mooc'] = 'coursera'
course['key'] = item['shortName']
course['title'] = item['name']
course['photo'] = item['photo']
course['trailer'] = item['video']
course['short_summary'] = item['shortDescription']
course['summary'] = item['aboutTheCourse']
course['recommended_background'] = item['recommendedBackground']
course['syllabus'] = item['courseSyllabus']
course['faq'] = item['faq']
links = item['links']
if 'instructors' in links:
instructors = []
for item_x in links['instructors']:
for item_y in self.response_instructors.json()['elements']:
if item_x == item_y['id']:
instructors.append(item_y)
course['instructors'] = instructors
else:
course['instructors'] = []
if 'categories' in links:
categories = []
for item_x in links['categories']:
for item_y in self.response_categories.json()['elements']:
if item_x == item_y['id']:
categories.append(item_y)
course['categories'] = categories
else:
course['categories'] = []
if 'universities' in links:
universities = []
for item_x in links['universities']:
for item_y in self.response_universities.json()['elements']:
if item_x == item_y['id']:
universities.append(item_y)
course['universities'] = universities
else:
course['universities'] = []
result.append(course)
return result
if __name__ == '__main__':
coursera_object = CourseraAPI()
print coursera_object.response_courses.json()
| {
"content_hash": "fb0eb5ef3cca2b5675fda539f71a9d07",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 254,
"avg_line_length": 42.63953488372093,
"alnum_prop": 0.5939460049086447,
"repo_name": "ueg1990/mooc_aggregator_restful_api",
"id": "a006b1594bab4b229bc6c72f265094ab35ad40be",
"size": "3667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mooc_aggregator_restful_api/coursera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17229"
}
],
"symlink_target": ""
} |
from random import randint
from flask import Blueprint, Response, flash, redirect, render_template, \
request, url_for
from setman import settings
from setman.frameworks.flask_setman.forms import settings_form_factory
from setman.frameworks.flask_setman.utils import update_form_fields
from setman.utils.auth import auth_permitted
setman_blueprint = Blueprint('setman', __name__, template_folder='templates')
@setman_blueprint.route('/edit', methods=('GET', 'POST'))
def edit():
"""
Edit all available settings.
"""
if not auth_permitted(request):
output = render_template('setman/edit.html', auth_forbidden=True)
return Response(output, status=403)
settings_form = settings_form_factory()
if request.method == 'POST':
form = settings_form(request.form)
if form.validate():
form.save()
flash('Settings have been succesfully updated.', 'success')
return redirect('%s?%d' % (url_for('setman.edit'),
randint(1000, 9999)))
else:
form = settings_form()
return render_template('setman/edit.html', form=update_form_fields(form))
@setman_blueprint.route('/revert')
def revert():
"""
Revert all settings to default values.
"""
from setman import settings
if not auth_permitted(request):
output = render_template('setman/edit.html', auth_forbidden=True)
return Response(output, status=403)
settings.revert()
flash('Settings have been reverted to default values.', 'success')
return redirect('%s?%d' % (url_for('setman.edit'), randint(1000, 9999)))
| {
"content_hash": "45f1051c41bfa8b3e27717af9ff9a60c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 30.054545454545455,
"alnum_prop": 0.6563823351482153,
"repo_name": "playpauseandstop/setman",
"id": "bc136385702937b350402842fbac4ccf60592d99",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setman/frameworks/flask_setman/blueprint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "175101"
}
],
"symlink_target": ""
} |
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class GroupConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig):
''' constructor for handling group options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Group'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['users'] = None
# pylint: disable=too-many-instance-attributes
class Group(Yedit):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self, content):
'''Group constructor'''
super(Group, self).__init__(content=content)
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCGroup(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'group'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCGroup '''
super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._group = None
@property
def group(self):
''' property function service'''
if not self._group:
self.get()
return self._group
@group.setter
def group(self, data):
''' setter function for yedit var '''
self._group = data
def exists(self):
''' return whether a group exists '''
if self.group:
return True
return False
def get(self):
'''return group information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.group = Group(content=result['results'][0])
elif 'groups \"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
skip = []
return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=skip, debug=True)
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for group
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
namespace=dict(default='default', type='str'),
# addind users to a group is handled through the oc_users module
#users=dict(default=None, type='list'),
),
supports_check_mode=True,
)
gconfig = GroupConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
)
oc_group = OCGroup(gconfig,
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_group.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_group.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_group.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_group.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_group.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_group.needs_update():
api_rval = oc_group.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| {
"content_hash": "84848c4d5334f9452fc25a4f1dfdbd61",
"timestamp": "",
"source": "github",
"line_count": 1098,
"max_line_length": 118,
"avg_line_length": 32.55373406193078,
"alnum_prop": 0.5254028648164727,
"repo_name": "rhdedgar/openshift-tools",
"id": "c0f9ec0dda19471048663395392cbf50e2e81f71",
"size": "36155",
"binary": false,
"copies": "3",
"ref": "refs/heads/stg",
"path": "ansible/roles/lib_openshift_3.2/library/oc_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "73250"
},
{
"name": "JavaScript",
"bytes": "960"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "20646861"
},
{
"name": "Shell",
"bytes": "903453"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetCensusIDByTypeAndName(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetCensusIDByTypeAndName Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetCensusIDByTypeAndName, self).__init__(temboo_session, '/Library/DataGov/GetCensusIDByTypeAndName')
def new_input_set(self):
return GetCensusIDByTypeAndNameInputSet()
def _make_result_set(self, result, path):
return GetCensusIDByTypeAndNameResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetCensusIDByTypeAndNameChoreographyExecution(session, exec_id, path)
class GetCensusIDByTypeAndNameInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetCensusIDByTypeAndName
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_GeographyName(self, value):
"""
Set the value of the GeographyName input for this Choreo. ((required, string) Specify the geography name for the correspnding type, with at least three leading characters. For example, for the geography type "state" you could enter "ore" for Oregon.)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('GeographyName', value)
def set_GeographyType(self, value):
"""
Set the value of the GeographyType input for this Choreo. ((required, string) Specify one of the following geography type values: "state", "county", "tract", "block", "congdistrict", "statehouse", "statesenate", "censusplace", or "msa" (metropolitan statistical area).)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('GeographyType', value)
def set_MaxResults(self, value):
"""
Set the value of the MaxResults input for this Choreo. ((required, integer) Specify the maximum number of results to return. Defaults to 50.)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('MaxResults', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('ResponseFormat', value)
class GetCensusIDByTypeAndNameResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetCensusIDByTypeAndName Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response returned from the API.)
"""
return self._output.get('Response', None)
def get_CensusID(self):
"""
Retrieve the value for the "CensusID" output from this Choreo execution. ((integer) The ID retrieved from the API call.)
"""
return self._output.get('CensusID', None)
class GetCensusIDByTypeAndNameChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetCensusIDByTypeAndNameResultSet(response, path)
| {
"content_hash": "66a0c7addf2443721bd731b4424e61dc",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 277,
"avg_line_length": 47.88157894736842,
"alnum_prop": 0.7136575982412751,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "c1511c1301b0f3f6290820226fb19077dc9673b2",
"size": "4537",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/DataGov/GetCensusIDByTypeAndName.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
} |
from edmunds.http.requestmiddleware import RequestMiddleware
from flask_security import auth_token_required
class TokenAuthMiddleware(RequestMiddleware):
"""
Token Authentication Middleware
"""
def before(self):
"""
Handle before the request
"""
decorator = auth_token_required(lambda: None)
result = decorator()
return result
| {
"content_hash": "0f9db61d44b341262fb177333c53204b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 60,
"avg_line_length": 22,
"alnum_prop": 0.6641414141414141,
"repo_name": "LowieHuyghe/edmunds",
"id": "e37ef4cc20489e3192404485005cb395ee542640",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edmunds/auth/middleware/tokenauthmiddleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "570304"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertAudienceType
class AlertAudienceType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""Alert Audience Data Type"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AlertAudienceType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 15, 1)
_Documentation = 'Alert Audience Data Type'
AlertAudienceType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=AlertAudienceType, enum_prefix=None)
AlertAudienceType.Customer = AlertAudienceType._CF_enumeration.addEnumeration(unicode_value='Customer', tag='Customer')
AlertAudienceType.Staff = AlertAudienceType._CF_enumeration.addEnumeration(unicode_value='Staff', tag='Staff')
AlertAudienceType.Operations = AlertAudienceType._CF_enumeration.addEnumeration(unicode_value='Operations', tag='Operations')
AlertAudienceType._InitializeFacetMap(AlertAudienceType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'AlertAudienceType', AlertAudienceType)
# Atomic simple type: {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertType
class AlertType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""Alert Type Data Type"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AlertType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 25, 1)
_Documentation = 'Alert Type Data Type'
AlertType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=AlertType, enum_prefix=None)
AlertType.Normal = AlertType._CF_enumeration.addEnumeration(unicode_value='Normal', tag='Normal')
AlertType.Forced = AlertType._CF_enumeration.addEnumeration(unicode_value='Forced', tag='Forced')
AlertType._InitializeFacetMap(AlertType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'AlertType', AlertType)
# Complex type {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}TrainAlert with content type ELEMENT_ONLY
class TrainAlert (pyxb.binding.basis.complexTypeDefinition):
"""Train Alert"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'TrainAlert')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 35, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertID uses Python identifier AlertID
__AlertID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'AlertID'), 'AlertID', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1AlertID', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 40, 3), )
AlertID = property(__AlertID.value, __AlertID.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertServices uses Python identifier AlertServices
__AlertServices = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'AlertServices'), 'AlertServices', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1AlertServices', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 45, 3), )
AlertServices = property(__AlertServices.value, __AlertServices.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}SendAlertBySMS uses Python identifier SendAlertBySMS
__SendAlertBySMS = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SendAlertBySMS'), 'SendAlertBySMS', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1SendAlertBySMS', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 50, 3), )
SendAlertBySMS = property(__SendAlertBySMS.value, __SendAlertBySMS.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}SendAlertByEmail uses Python identifier SendAlertByEmail
__SendAlertByEmail = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SendAlertByEmail'), 'SendAlertByEmail', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1SendAlertByEmail', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 55, 3), )
SendAlertByEmail = property(__SendAlertByEmail.value, __SendAlertByEmail.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}SendAlertByTwitter uses Python identifier SendAlertByTwitter
__SendAlertByTwitter = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SendAlertByTwitter'), 'SendAlertByTwitter', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1SendAlertByTwitter', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 60, 3), )
SendAlertByTwitter = property(__SendAlertByTwitter.value, __SendAlertByTwitter.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}Source uses Python identifier Source
__Source = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Source'), 'Source', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1Source', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 65, 3), )
Source = property(__Source.value, __Source.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertText uses Python identifier AlertText
__AlertText = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'AlertText'), 'AlertText', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1AlertText', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 70, 3), )
AlertText = property(__AlertText.value, __AlertText.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}Audience uses Python identifier Audience
__Audience = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Audience'), 'Audience', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1Audience', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 75, 3), )
Audience = property(__Audience.value, __Audience.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertType uses Python identifier AlertType
__AlertType = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'AlertType'), 'AlertType', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1AlertType', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 80, 3), )
AlertType = property(__AlertType.value, __AlertType.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}CopiedFromAlertID uses Python identifier CopiedFromAlertID
__CopiedFromAlertID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'CopiedFromAlertID'), 'CopiedFromAlertID', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1CopiedFromAlertID', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 85, 3), )
CopiedFromAlertID = property(__CopiedFromAlertID.value, __CopiedFromAlertID.set, None, 'TODO')
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}CopiedFromSource uses Python identifier CopiedFromSource
__CopiedFromSource = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'CopiedFromSource'), 'CopiedFromSource', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_TrainAlert_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1CopiedFromSource', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 90, 3), )
CopiedFromSource = property(__CopiedFromSource.value, __CopiedFromSource.set, None, 'TODO')
_ElementMap.update({
__AlertID.name() : __AlertID,
__AlertServices.name() : __AlertServices,
__SendAlertBySMS.name() : __SendAlertBySMS,
__SendAlertByEmail.name() : __SendAlertByEmail,
__SendAlertByTwitter.name() : __SendAlertByTwitter,
__Source.name() : __Source,
__AlertText.name() : __AlertText,
__Audience.name() : __Audience,
__AlertType.name() : __AlertType,
__CopiedFromAlertID.name() : __CopiedFromAlertID,
__CopiedFromSource.name() : __CopiedFromSource
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'TrainAlert', TrainAlert)
# Complex type {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertServices with content type ELEMENT_ONLY
class AlertServices (pyxb.binding.basis.complexTypeDefinition):
"""A list of services to which the alert applies"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AlertServices')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 124, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertService uses Python identifier AlertService
__AlertService = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'AlertService'), 'AlertService', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_AlertServices_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1AlertService', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 129, 3), )
AlertService = property(__AlertService.value, __AlertService.set, None, 'TODO')
_ElementMap.update({
__AlertService.name() : __AlertService
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'AlertServices', AlertServices)
# Complex type {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}AlertService with content type ELEMENT_ONLY
class AlertService (pyxb.binding.basis.complexTypeDefinition):
"""TODO"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AlertService')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 97, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/TrainAlerts/v1}Location uses Python identifier Location
__Location = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Location'), 'Location', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_AlertService_httpwww_thalesgroup_comrttiPushPortTrainAlertsv1Location', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 102, 3), )
Location = property(__Location.value, __Location.set, None, 'TODO')
# Attribute RID uses Python identifier RID
__RID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'RID'), 'RID', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_AlertService_RID', _ImportedBinding_darwinpush_xb_ct.RIDType)
__RID._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 108, 2)
__RID._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 108, 2)
RID = property(__RID.value, __RID.set, None, 'TODO')
# Attribute UID uses Python identifier UID
__UID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'UID'), 'UID', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_AlertService_UID', _ImportedBinding_darwinpush_xb_ct.UIDType)
__UID._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 113, 2)
__UID._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 113, 2)
UID = property(__UID.value, __UID.set, None, 'TODO')
# Attribute SSD uses Python identifier SSD
__SSD = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'SSD'), 'SSD', '__httpwww_thalesgroup_comrttiPushPortTrainAlertsv1_AlertService_SSD', pyxb.binding.datatypes.date)
__SSD._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 118, 2)
__SSD._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 118, 2)
SSD = property(__SSD.value, __SSD.set, None, 'TODO')
_ElementMap.update({
__Location.name() : __Location
})
_AttributeMap.update({
__RID.name() : __RID,
__UID.name() : __UID,
__SSD.name() : __SSD
})
Namespace.addCategoryObject('typeBinding', 'AlertService', AlertService)
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'AlertID'), pyxb.binding.datatypes.string, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 40, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'AlertServices'), AlertServices, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 45, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SendAlertBySMS'), pyxb.binding.datatypes.boolean, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 50, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SendAlertByEmail'), pyxb.binding.datatypes.boolean, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 55, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SendAlertByTwitter'), pyxb.binding.datatypes.boolean, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 60, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Source'), pyxb.binding.datatypes.string, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 65, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'AlertText'), pyxb.binding.datatypes.string, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 70, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Audience'), AlertAudienceType, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 75, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'AlertType'), AlertType, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 80, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CopiedFromAlertID'), pyxb.binding.datatypes.string, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 85, 3)))
TrainAlert._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'CopiedFromSource'), pyxb.binding.datatypes.string, scope=TrainAlert, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 90, 3)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 85, 3))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 90, 3))
counters.add(cc_1)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'AlertID')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 40, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'AlertServices')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 45, 3))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SendAlertBySMS')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 50, 3))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SendAlertByEmail')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 55, 3))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SendAlertByTwitter')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 60, 3))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Source')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 65, 3))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'AlertText')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 70, 3))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = None
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Audience')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 75, 3))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'AlertType')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 80, 3))
st_8 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'CopiedFromAlertID')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 85, 3))
st_9 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(TrainAlert._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'CopiedFromSource')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 90, 3))
st_10 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
]))
transitions.append(fac.Transition(st_10, [
]))
st_8._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_0, False) ]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_1, True) ]))
st_10._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
TrainAlert._Automaton = _BuildAutomaton()
AlertServices._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'AlertService'), AlertService, scope=AlertServices, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 129, 3)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 129, 3))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(AlertServices._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'AlertService')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 129, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
AlertServices._Automaton = _BuildAutomaton_()
AlertService._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Location'), pyxb.binding.datatypes.string, scope=AlertService, documentation='TODO', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 102, 3)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(AlertService._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Location')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainAlerts_v1.xsd', 102, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
AlertService._Automaton = _BuildAutomaton_2()
| {
"content_hash": "cb8f271313c970bd439cf497e160e914",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 397,
"avg_line_length": 63.21063829787234,
"alnum_prop": 0.7554613080211384,
"repo_name": "HackTrain/darwinpush",
"id": "e8147fbb671a32d1e1e9abe5eb3738dee968c496",
"size": "29993",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "darwinpush/xb/raw/ta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "573058"
}
],
"symlink_target": ""
} |
import click
import os
from textx import GeneratorDesc
def codegen_flow_pu(metamodel, model, output_path, overwrite, debug=False,
**custom_args):
"""
This command transforms *.flow-files to *.pu files (plantuml).
"""
txt = "@startuml\n"
for a in model.algos:
txt += "component {}\n".format(a.name)
for f in model.flows:
txt += '{} "{}" #--# {}\n'.format(f.algo1.name, f.algo1.outp.name,
f.algo2.name)
txt += "@enduml\n"
# Dump custom args for testing
txt += '\n'.join(["{}={}".format(arg_name, arg_value)
for arg_name, arg_value in custom_args.items()])
input_file = model._tx_filename
base_dir = output_path if output_path else os.path.dirname(input_file)
base_name, _ = os.path.splitext(os.path.basename(input_file))
output_file = os.path.abspath(
os.path.join(base_dir, "{}.{}".format(base_name, 'pu')))
if overwrite or not os.path.exists(output_file):
click.echo('-> {}'.format(output_file))
with open(output_file, "w") as f:
f.write(txt)
else:
click.echo('-- Skipping: {}'.format(output_file))
flow_pu = GeneratorDesc(
language='flow-dsl',
target='PlantUML',
description='Generating PlantUML visualization from flow-dsl',
generator=codegen_flow_pu)
| {
"content_hash": "a9829f0a113f0598a8b0b7fc7a3b0336",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 33.68292682926829,
"alnum_prop": 0.5843591600289645,
"repo_name": "igordejanovic/textX",
"id": "ac6c87d55c17895ebfe99ae0473879836506f1cf",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/registration/projects/flow_codegen/flow_codegen/generators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "93"
},
{
"name": "Brainfuck",
"bytes": "87"
},
{
"name": "C",
"bytes": "96"
},
{
"name": "Limbo",
"bytes": "634"
},
{
"name": "Python",
"bytes": "391212"
},
{
"name": "Shell",
"bytes": "1094"
}
],
"symlink_target": ""
} |
import unittest
from nosedep import depends
class TestSimpleDecorated(unittest.TestCase):
"""Specifically tests when inheriting from unittest.TestCase"""
@depends(after='test_simple_decorated_ok')
def test_simple_decorated_fail(self):
assert 1 == 0
def test_simple_decorated_ok(self):
assert 1 == 1
@depends(after='test_simple_decorated_fail')
def test_simple_decorated_skip(self):
assert 1 == 1
| {
"content_hash": "516901ac91b60d88cee69b3db7f22753",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 67,
"avg_line_length": 26.41176470588235,
"alnum_prop": 0.688195991091314,
"repo_name": "Zitrax/nose-dep",
"id": "9676393c8e3fd500698312264f6f4e2482051de5",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_scripts/simple_decorated.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41635"
}
],
"symlink_target": ""
} |
from easy_print import *
from working_init import *
def main():
"""
Working With Adversaries
"""
# optionally set max results
tc.set_max_results("500")
"""
Get Adversary by ID
Method:
get_adversary_by_id(id)
id -> adversary id #
Use this method to return a single adversary result by passing
an adversary id.
To run sample code change the "False" value to "True"
"""
if False:
# get adversary by id
adversary_id = 76308
results = tc.get_adversary_by_id(adversary_id)
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
"""
Get Adversaries
Method:
get_adversaries(owners=<list of owners>)
owners -> (optional) list of owners
Use this method to return adversary results. A list of owners can be
optionally provided. If no owners are provided the default owner
The "tc.get_owners()" function can be used to get a list of owners.
To run sample code change the "False" value to "True"
"""
if False:
# get all adversaries for default owner
tc.set_max_results("75") # optionally override default max results
results = tc.get_adversaries()
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
if False:
# get all adversaries for all owners
tc.set_max_results("10") # optionally override default max results
requested_owners = tc.get_owners().data().name_list()
results = tc.get_adversaries(requested_owners)
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
"""
Get Adversaries by Indicator
Method:
get_adversaries_by_indicator(indicator, indicator_type=None, owners=[]):
indicator -> any indicator
indicator_type -> (optional) indicator type
owners -> (optional) list of owners
Use this method to return adversaries by a user provided indicator.
Optionally provide the indicator type. If no indicator type is provided
the indicator type will be automatically determined. A list of owners can
be optionally provided. If no owners are provided the default owner
organization is used.
The "tc.get_owners()" function can be used to get a list of owners.
To run sample code change the "False" value to "True"
"""
if False:
# get adversaries by indicator for default owner
indicator = "1.2.3.4"
results = tc.get_adversaries_by_indicator(indicator)
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
if False:
# get adversaries by indicator for all owners
# indicator = "218.65.4.171"
indicator = "1.2.3.4"
requested_owners = tc.get_owners().data().name_list()
results = tc.get_adversaries_by_indicator(indicator, owners=requested_owners)
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
"""
Get Adversaries by Tag
Method:
get_adversaries_by_tag(tag_name, owners=[]):
tag_name -> a tag name
owners -> (optional) list of owners
Use this method to return adversaries by a user provided tag name. A
list of owners can be optionally provided. If no owners are provided
the default owner organization is used.
The "tc.get_owners()" function can be used to get a list of owners.
To run sample code change the "False" value to "True"
"""
if False:
# get adversaries by tag for default owner
tag = "adam"
results = tc.get_adversaries_by_tag(tag)
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
if False:
# get adversaries by tag for all owners
tag = "Advanced Persistent Threat"
requested_owners = tc.get_owners().data().name_list()
results = tc.get_adversaries_by_tag(tag, requested_owners)
# Request Status (string)
easy_print('Request Status', results.status())
# Request URIs (list)
easy_print('Request URIs', results.uris())
# Response Count (int)
easy_print('Response Count', results.count())
# API Response (dict)
easy_print('API Response', results.api_response())
if results.status() == "Success":
# get indicator keys for data type
data_methods = ["%s_list" % item for item in results.data().data_structure]
# get data object
results_data = results.data()
# loop through all data methods
for meth in data_methods:
easy_print(meth, getattr(results_data, meth)())
# count (int)
easy_print('count', results_data.count())
# json (string)
easy_print('json', results_data.json())
# csv (string)
easy_print('csv', results_data.csv())
# keyval (string)
easy_print('keyval', results_data.keyval())
if __name__ == "__main__":
main()
| {
"content_hash": "3c179ef645565556a2459a505c156aa1",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 87,
"avg_line_length": 29.36,
"alnum_prop": 0.5707538601271571,
"repo_name": "andybarilla/TCPy_V2",
"id": "e4c542c6c035d326c2dc82296d80af86f84a6284",
"size": "11010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "working_with_adversaries.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "302498"
}
],
"symlink_target": ""
} |
import sys
import pysam
import numpy
global CACHE_CHROM
global CACHE_SAMFILE
CACHE_CHROM = None
CACHE_SAMFILE = None
def check_pysam_chrom(samFile, chrom=None):
"""Chech if samFile is a file name or pysam object, and if chrom format.
"""
global CACHE_CHROM
global CACHE_SAMFILE
if CACHE_CHROM is not None:
if (samFile == CACHE_SAMFILE) and (chrom == CACHE_CHROM):
return CACHE_SAMFILE, CACHE_CHROM
if type(samFile) == str or type(samFile) == numpy.str_:
ftype = samFile.split(".")[-1]
if ftype != "bam" and ftype != "sam" and ftype != "cram" :
print("Error: file type need suffix of bam, sam or cram.")
sys.exit(1)
if ftype == "cram":
samFile = pysam.AlignmentFile(samFile, "rc")
elif ftype == "bam":
samFile = pysam.AlignmentFile(samFile, "rb")
else:
samFile = pysam.AlignmentFile(samFile, "r")
if chrom is not None:
if chrom not in samFile.references:
if chrom.startswith("chr"):
chrom = chrom.split("chr")[1]
else:
chrom = "chr" + chrom
if chrom not in samFile.references:
print("Can't find references %s in samFile" %chrom)
return samFile, None
CACHE_CHROM = chrom
CACHE_SAMFILE = samFile
return samFile, chrom
def load_samfile(samFile, chrom=None):
"""Chech if samFile is a file name or pysam object, and if chrom format.
"""
print('Warning: The load_samfile() function is recommended to be ' +
'replaced by check_pysam_chrom().')
global CACHE_CHROM
global CACHE_SAMFILE
# get from cache
if CACHE_CHROM is not None:
if (samFile == CACHE_SAMFILE) and (chrom == CACHE_CHROM):
return CACHE_SAMFILE, CACHE_CHROM
# open file
if type(samFile) == str or type(samFile) == numpy.str_:
ftype = samFile.split(".")[-1]
if ftype != "bam" and ftype != "sam" and ftype != "cram" :
print("Error: file type need suffix of bam, sam or cram.")
sys.exit(1)
if ftype == "cram":
samFile = pysam.AlignmentFile(samFile, "rc")
elif ftype == "bam":
samFile = pysam.AlignmentFile(samFile, "rb")
else:
samFile = pysam.AlignmentFile(samFile, "r")
else:
print("[BRIE2] Error: unknown data type: %s" %samFile)
print(type(samFile), type(samFile) == numpy.str_)
sys.exit(1)
if chrom is not None:
if chrom not in samFile.references:
if chrom.startswith("chr"):
chrom = chrom.split("chr")[1]
else:
chrom = "chr" + chrom
if chrom not in samFile.references:
print("Can't find references %s in samFile" %chrom)
return samFile, None
CACHE_CHROM = chrom
CACHE_SAMFILE = samFile
return samFile, chrom
else:
CACHE_SAMFILE = samFile
return samFile
def fetch_reads(samfile, chrom, start, end, rm_duplicate=True, inner_only=True,
mapq_min=0, trimLen_max=1e6, rlen_min=1, is_mated=True):
"""To fetch the reads in a given region from a pysam AlignmentFile.
Args:
samfile: A Samfile object in pysam.
chrom: A string of chromosome, e.g., "IV", "chr10".
start: An integer of the start position for mapped reads.
end: An integer of the end position for mapped reads.
rm_duplicate: A bool for only keeping the first one of duplicates.
inner_only: A bool for only keeping fully region matched reads.
mapq_min: An integer of the minimum of map quality.
trimLen_max: An integer of the maximum length of trimmed bases.
rlen_min: An integer of the minimum of read length.
is_paired: A bool for mating paired-end reads.
Returns:
A dict containing lists of mated reads1 and reads2, and unmated reads1u
and reads2u, i.e.,
{'reads1': [r11, r21, ...]
'reads2': [r12, r22, ...]
'reads1u': [r*1, r*1, ...]
'reads2u': [r*2, r*2, ...]}
reads1 is the 5-end of the fragment, and reads2 is the 3-end of the
fragment.
Raises:
ValueError: An error occurred when fetching reads.
AssertionError: An error occurred when fetching reads.
"""
#part 1. check the input and fetch the reads
chrom = str(chrom)
if chrom in samfile.references:
pass
else:
chrom_parts = chrom.split("chr")
if len(chrom_parts) <= 1:
chrom = chrom_parts[0]
else:
chrom = chrom_parts[1]
try:
reads = samfile.fetch(chrom, start, end)
except ValueError:
reads = []
print("Cannot fetch reads in region: %s:%d-%d" %(chrom, start, end))
except AssertionError:
reads = []
print("AssertionError in region: %s:%d-%d" %(chrom, start, end))
print(" - Check that your BAM file is indexed!")
#part 2. get reads and filter some of them
qname1, qname2 = [], []
reads1, reads2 = [], []
r_prev = None
for r in reads:
# filter 4: only keep the first one of duplicates
if (rm_duplicate and r_prev is not None and r_prev.qname == r.qname and
r_prev.positions == r.positions): r_prev = r; continue
r_prev = r
# filter 1: only particially mapped to the regions
if inner_only == True and (r.pos is None or r.pos < start or
r.aend is None or r.aend > end): continue
# filter 2: too low map quality
if r.mapq < mapq_min: continue
# filter 3: too long trimmed bases
if r.rlen - len(r.positions) > trimLen_max: continue
# filter 5: too short mapped length
if len(r.positions) < rlen_min: continue
if r.is_read2:
reads2.append(r)
qname2.append(r.qname)
else:
reads1.append(r)
qname1.append(r.qname)
#part 2.1 chech the mate reads' query
FLAG = True
if len(qname1) > 0:
for i in range(len(qname1)-1):
if qname1[i][-1] != qname1[i+1][-1]:
FLAG = False
break
if FLAG and len(qname2) > 0:
for i in range(len(qname1)-1):
if qname1[i][-1] != qname1[i+1][-1]:
FLAG = False
break
if FLAG:
for i in range(len(qname1)):
qname1[i] = qname1[i][:-1]
for i in range(len(qname2)):
qname2[i] = qname2[i][:-1]
# part 3. mate the reads
rv_reads1, rv_reads2 = [], []
rv_reads1u, rv_reads2u = [], []
if is_mated == True:
idx1 = sorted(range(len(qname1)), key=qname1.__getitem__)
idx2 = sorted(range(len(qname2)), key=qname2.__getitem__)
i1, i2 = 0, 0
while i1 < len(idx1) and i2 < len(idx2):
if qname1[idx1[i1]] == qname2[idx2[i2]]:
rv_reads1.append(reads1[idx1[i1]])
rv_reads2.append(reads2[idx2[i2]])
i1, i2 = i1 + 1, i2 + 1
elif qname1[idx1[i1]] < qname2[idx2[i2]]:
rv_reads1u.append(reads1[idx1[i1]])
i1 += 1
elif qname1[idx1[i1]] > qname2[idx2[i2]]:
rv_reads2u.append(reads2[idx2[i2]])
i2 += 1
for i in range(i1, len(idx1)):
rv_reads1u.append(reads1[idx1[i]])
for i in range(i2, len(idx2)):
rv_reads2u.append(reads2[idx2[i]])
else:
rv_reads1u, rv_reads2u = reads1, reads2
# part 4. return reads
RV = {}
RV["reads1"] = rv_reads1
RV["reads2"] = rv_reads2
RV["reads1u"] = rv_reads1u
RV["reads2u"] = rv_reads2u
return RV
| {
"content_hash": "187e6f4b0a8bae25f16964356d3cd2e3",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 80,
"avg_line_length": 34.54867256637168,
"alnum_prop": 0.5581454918032787,
"repo_name": "huangyh09/brie",
"id": "ba77600ef8fff51308f4f07c67eaab6168f5a70b",
"size": "7928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brie/utils/sam_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "233075"
},
{
"name": "Shell",
"bytes": "1944"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class Status(Model):
"""The status of an Azure resource at the time the operation was called.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar display_status: The short label for the status.
:vartype display_status: str
:ivar message: The detailed message for the status, including alerts and
error messages.
:vartype message: str
:ivar timestamp: The timestamp when the status was changed to the current
value.
:vartype timestamp: datetime
"""
_validation = {
'display_status': {'readonly': True},
'message': {'readonly': True},
'timestamp': {'readonly': True},
}
_attribute_map = {
'display_status': {'key': 'displayStatus', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
}
def __init__(self):
self.display_status = None
self.message = None
self.timestamp = None
| {
"content_hash": "7e8ebb12e6badedda64392617fcc6f84",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 30.514285714285716,
"alnum_prop": 0.6207865168539326,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "af9d5066c871f765eadc707f6ac10a15672bcee1",
"size": "1542",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/models/status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SelfRegistrationInvitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=126, db_index=True)),
('phone_number', models.CharField(max_length=30, db_index=True)),
('token', models.CharField(unique=True, max_length=126, db_index=True)),
('app_id', models.CharField(max_length=126, null=True)),
('expiration_date', models.DateField()),
('created_date', models.DateTimeField()),
('odk_url', models.CharField(max_length=126, null=True)),
('phone_type', models.CharField(max_length=20, null=True, choices=[('android', 'Android'), ('other', 'Other')])),
('registered_date', models.DateTimeField(null=True)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "863d50e06c8b10091fbaeab829afda33",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 129,
"avg_line_length": 41.10344827586207,
"alnum_prop": 0.5503355704697986,
"repo_name": "dimagi/commcare-hq",
"id": "0348bfc3aede7a83ba1e69a06410cf061126d983",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/sms/migrations/0002_add_selfregistrationinvitation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
"""
twython.streaming.types
~~~~~~~~~~~~~~~~~~~~~~~
This module contains classes and methods for :class:`TwythonStreamer` to use.
"""
class TwythonStreamerTypes(object):
"""Class for different stream endpoints
Not all streaming endpoints have nested endpoints.
User Streams and Site Streams are single streams with no nested endpoints
Status Streams include filter, sample and firehose endpoints
"""
def __init__(self, streamer):
self.streamer = streamer
self.statuses = TwythonStreamerTypesStatuses(streamer)
def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def site(self, **params):
"""Stream site
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/site
"""
url = 'https://sitestream.twitter.com/%s/site.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
class TwythonStreamerTypesStatuses(object):
"""Class for different statuses endpoints
Available so :meth:`TwythonStreamer.statuses.filter()` is available.
Just a bit cleaner than :meth:`TwythonStreamer.statuses_filter()`,
:meth:`statuses_sample()`, etc. all being single methods in
:class:`TwythonStreamer`.
"""
def __init__(self, streamer):
self.streamer = streamer
self.params = None
def filter(self, **params):
"""Stream statuses/filter
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://developer.twitter.com/en/docs/tweets/filter-realtime/api-reference/post-statuses-filter
"""
url = 'https://stream.twitter.com/%s/statuses/filter.json' \
% self.streamer.api_version
self.streamer._request(url, 'POST', params=params)
def sample(self, **params):
"""Stream statuses/sample
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://developer.twitter.com/en/docs/tweets/sample-realtime/api-reference/get-statuses-sample
"""
url = 'https://stream.twitter.com/%s/statuses/sample.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def set_dynamic_filter(self, **params):
"""Set/update statuses/filter
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://developer.twitter.com/en/docs/tweets/filter-realtime/api-reference/post-statuses-filter
"""
self.params = params
def dynamic_filter(self):
"""Stream statuses/filter with dynamic parameters"""
url = 'https://stream.twitter.com/%s/statuses/filter.json' \
% self.streamer.api_version
self.streamer._request(url, 'POST', params=self.params)
| {
"content_hash": "3c8a820968860f183ee981fb26ab398b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 103,
"avg_line_length": 33.443396226415096,
"alnum_prop": 0.6372355430183357,
"repo_name": "Oire/twython",
"id": "81c5c07fadcc062c1d812c7a4508c2fc36c17853",
"size": "3570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "twython/streaming/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139700"
}
],
"symlink_target": ""
} |
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
from boids.flock import Flock
from boids.flight import Flight
from argparse import ArgumentParser
import yaml
import os
def process():
_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_ROOT,'config/config.yaml')) as config_file:
config = yaml.load(config_file)
num_boids = config["number_of_boids"]
initial_params = np.array([config["x_bounds"], config["y_bounds"]])
interaction_params = np.array([config["centre_attraction"], config["retreat_distance"], config["attraction_distance"], config["drag_strength"]])
#make a flock of boids and make them fly
flock_of_boids = Flock(num_boids, initial_params).init_cond_matrix()
flying_flock = Flight(flock_of_boids,interaction_params)
#display the boids
frame_x_min = config["x_bounds"][0]-config["x_margin"]
frame_x_max = config["x_bounds"][1]+config["x_margin"]
frame_y_min = config["y_bounds"][0]-config["y_margin"]
frame_y_max = config["y_bounds"][1]+config["y_margin"]
figure = plt.figure()
axes = plt.axes(xlim=(frame_x_min, frame_x_max), ylim = (frame_y_min, frame_y_max))
scatter = axes.scatter(flying_flock.get_x(),flying_flock.get_y())
def animate(frame):
scatter.set_offsets(zip(flying_flock.update_boids().get_x(), flying_flock.update_boids().get_y()))
anim = animation.FuncAnimation(figure, animate, frames = config["frame_number"], interval = config["frame_interval"])
plt.show()
if __name__ == "__main__":
process()
| {
"content_hash": "4583da67f7deed0b484c6b0825005442",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 145,
"avg_line_length": 27.5,
"alnum_prop": 0.7032467532467532,
"repo_name": "cpmck/boids",
"id": "b7e8bcb7d52cb8bf553e75640f3065885ccc4c13",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boids/betterboids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9546"
}
],
"symlink_target": ""
} |
def getLatestTimestamp(projectPath):
NYI
| {
"content_hash": "e4f5ddda595437dbd8687b1691e6a4c2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 36,
"avg_line_length": 22.5,
"alnum_prop": 0.7777777777777778,
"repo_name": "joehewitt/devon",
"id": "ac83c0732b03e662aff9ef4e9b000e5818f07927",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devon/rcs/svn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "48209"
},
{
"name": "C++",
"bytes": "35018"
},
{
"name": "JavaScript",
"bytes": "27618"
},
{
"name": "Python",
"bytes": "331315"
}
],
"symlink_target": ""
} |
import webapp2
import urllib
from google.appengine.api import mail
import sys
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello!')
class CheckServer(webapp2.RequestHandler):
def sendmail(self, errormsg):
sender_address = "Sender <sensder@mydomain.com>"
user_address = "Admin <admin@mydomain.com>"
subject = "Web Server Alarm!"
body = errormsg
self.response.write(body)
mail.send_mail(sender_address, user_address, subject, body)
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
try:
code = urllib.urlopen("http://mydomain.com").getcode()
if(code!=200):
self.sendmail('HTTP Code: '+str(code))
except IOError as e:
self.sendmail("I/O error({0}): {1}".format(e.errno, e.strerror))
except Exception as x:
self.sendmail(str(x))
application = webapp2.WSGIApplication([
('/check', CheckServer),
('/', MainPage),
], debug=True) | {
"content_hash": "221d0d64ff5abed102906e0b60c3007b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 27.5,
"alnum_prop": 0.6564593301435406,
"repo_name": "indianazhao/checkserver",
"id": "529edc01862ca4e7ba54383b1e70037aca28ec4b",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkserver.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Module providing views for the folderish content page type"""
import json
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
from plone import api
from zope.component import getMultiAdapter
IMG = 'data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACwAAAAAAQABAAACAkQBADs='
class ProjectView(BrowserView):
""" Show room default view """
def __call__(self):
self.has_assets = len(self.assets()) > 0
return self.render()
def render(self):
return self.index()
def can_edit(self):
show = False
if not api.user.is_anonymous():
show = True
return show
def assets(self):
context = aq_inner(self.context)
data = getattr(context, 'assets')
if data is None:
data = dict()
return data
def stored_data(self):
return json.loads(self.assets())
def image_list(self):
data = self.stored_data()
items = data['items']
images = list()
for item in items:
item_uid = str(item)
catalog = api.portal.get_tool(name='portal_catalog')
stack = catalog.unrestrictedSearchResults(UID=item_uid)
obj = stack[0].getObject()
contained_imgs = obj.unrestrictedTraverse('@@folderListing')()
for img in contained_imgs:
img_uid = api.content.get_uuid(obj=img.getObject())
images.append(img_uid)
return images
def image_tag(self, uuid):
context = api.content.get(UID=uuid)
scales = getMultiAdapter((context, self.request), name='images')
scale = scales.scale('image', width=1170, height=720)
item = {}
if scale is not None:
item['url'] = scale.url
item['width'] = scale.width
item['height'] = scale.height
else:
item['url'] = IMG
item['width'] = '1px'
item['height'] = '1px'
return item
| {
"content_hash": "e1a35c74dad70dc34a16321104d85eec",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 30.76923076923077,
"alnum_prop": 0.581,
"repo_name": "a25kk/newe",
"id": "bb407ec4bc0e852acb6267c76632b96484f53083",
"size": "2024",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/newe.sitecontent/newe/sitecontent/browser/project.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56680"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "105462"
},
{
"name": "JavaScript",
"bytes": "58909"
},
{
"name": "Makefile",
"bytes": "1092"
},
{
"name": "Python",
"bytes": "73174"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
import modelcluster.tags
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0002_initial_data'),
('wagtaildocs', '0002_initial_data'),
('wagtailcore', '0002_initial_data'),
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Advert',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(null=True, blank=True)),
('text', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AdvertPlacement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('advert', models.ForeignKey(related_name=b'+', to='ethagaval.Advert')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.RichTextField()),
('date', models.DateField(verbose_name=b'Post date')),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name=b'tagged_items', to='ethagaval.BlogPage')),
('tag', models.ForeignKey(related_name='ethagaval_blogpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContactPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('telephone', models.CharField(max_length=20, blank=True)),
('email', models.EmailField(max_length=75, blank=True)),
('address_1', models.CharField(max_length=255, blank=True)),
('address_2', models.CharField(max_length=255, blank=True)),
('city', models.CharField(max_length=255, blank=True)),
('country', models.CharField(max_length=255, blank=True)),
('post_code', models.CharField(max_length=10, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='EventIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date_from', models.DateField(verbose_name=b'Start date')),
('date_to', models.DateField(help_text=b'Not required if event is on a single day', null=True, verbose_name=b'End date', blank=True)),
('time_from', models.TimeField(null=True, verbose_name=b'Start time', blank=True)),
('time_to', models.TimeField(null=True, verbose_name=b'End time', blank=True)),
('audience', models.CharField(max_length=255, choices=[(b'public', b'Public'), (b'private', b'Private')])),
('location', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('first_name', models.CharField(max_length=255, verbose_name=b'Name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name=b'Surname', blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255)),
('field_type', models.CharField(max_length=16, choices=[(b'singleline', 'Single line text'), (b'multiline', 'Multi-line text'), (b'email', 'Email'), (b'number', 'Number'), (b'url', 'URL'), (b'checkbox', 'Checkbox'), (b'checkboxes', 'Checkboxes'), (b'dropdown', 'Drop down'), (b'radio', 'Radio buttons'), (b'date', 'Date'), (b'datetime', 'Date/time')])),
('required', models.BooleanField(default=True)),
('choices', models.CharField(help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', max_length=512, blank=True)),
('default_value', models.CharField(help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, blank=True)),
('help_text', models.CharField(max_length=255, blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('to_address', models.CharField(help_text='Optional - form submissions will be emailed to this address', max_length=255, blank=True)),
('from_address', models.CharField(max_length=255, blank=True)),
('subject', models.CharField(max_length=255, blank=True)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'verbose_name': 'Homepage',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HomePageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PersonPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('telephone', models.CharField(max_length=20, blank=True)),
('email', models.EmailField(max_length=75, blank=True)),
('address_1', models.CharField(max_length=255, blank=True)),
('address_2', models.CharField(max_length=255, blank=True)),
('city', models.CharField(max_length=255, blank=True)),
('country', models.CharField(max_length=255, blank=True)),
('post_code', models.CharField(max_length=10, blank=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('biography', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='PersonPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='standardindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='standardindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.StandardIndexPage'),
preserve_default=True,
),
migrations.AddField(
model_name='personpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='personpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.PersonPage'),
preserve_default=True,
),
migrations.AddField(
model_name='homepagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='homepagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.HomePage'),
preserve_default=True,
),
migrations.AddField(
model_name='homepagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='homepagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.HomePage'),
preserve_default=True,
),
migrations.AddField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'form_fields', to='ethagaval.FormPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'speakers', to='ethagaval.EventPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.EventPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.EventPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.EventIndexPage'),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.BlogPage'),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.BlogPage'),
preserve_default=True,
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.tags.ClusterTaggableManager(to='taggit.Tag', through='ethagaval.BlogPageTag', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.BlogIndexPage'),
preserve_default=True,
),
migrations.AddField(
model_name='advertplacement',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'advert_placements', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AddField(
model_name='advert',
name='page',
field=models.ForeignKey(related_name=b'adverts', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
]
| {
"content_hash": "e5adc1a0f84c95b3a2f1bef5111b61b9",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 369,
"avg_line_length": 51.476027397260275,
"alnum_prop": 0.563701683188078,
"repo_name": "arvindram03/e-thagaval",
"id": "63a8b492aa1ebee7a1240475174f8d4e20ecbc48",
"size": "30086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ethagaval/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1987"
},
{
"name": "HTML",
"bytes": "20087"
},
{
"name": "JavaScript",
"bytes": "218"
},
{
"name": "Python",
"bytes": "84149"
},
{
"name": "Shell",
"bytes": "6308"
}
],
"symlink_target": ""
} |
"""Main function to train Attribute-Mask RCNN models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pprint
from absl import flags
from absl import logging
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from dataloader import mode_keys as ModeKeys
from executor import tpu_executor
from projects.fashionpedia.configs import factory
from projects.fashionpedia.dataloader import input_reader
from projects.fashionpedia.modeling import model_builder
from utils import config_utils
import sys
sys.path.insert(0, 'tpu/models')
from hyperparameters import common_hparams_flags
from hyperparameters import common_tpu_flags
from hyperparameters import params_dict
common_tpu_flags.define_common_tpu_flags()
common_hparams_flags.define_common_hparams_flags()
flags.DEFINE_string(
'mode', default='train',
help='Mode to run: `train`, `eval` or `train_and_eval`.')
flags.DEFINE_string(
'model', default='attribute_mask_rcnn',
help='Support `attribute_mask_rcnn`.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training.')
flags.DEFINE_string(
'tpu_job_name', None,
'Name of TPU worker binary. Only necessary if job name is changed from'
' default tpu_worker.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
params = factory.config_generator(FLAGS.model)
if FLAGS.config_file:
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.override({
'use_tpu': FLAGS.use_tpu,
'model_dir': FLAGS.model_dir,
}, is_strict=True)
if not FLAGS.use_tpu:
params.override({
'architecture': {
'use_bfloat16': False,
},
'batch_norm_activation': {
'use_sync_bn': False,
},
}, is_strict=True)
# Only run spatial partitioning in training mode.
if FLAGS.mode != 'train':
params.train.input_partition_dims = None
params.train.num_cores_per_replica = None
params_to_save = params_dict.ParamsDict(params)
params.override({
'platform': {
'eval_master': FLAGS.eval_master,
'tpu': FLAGS.tpu,
'tpu_zone': FLAGS.tpu_zone,
'gcp_project': FLAGS.gcp_project,
},
'tpu_job_name': FLAGS.tpu_job_name,
'train': {
'num_shards': FLAGS.num_cores,
},
}, is_strict=False)
params.validate()
params.lock()
pp = pprint.PrettyPrinter()
params_str = pp.pformat(params.as_dict())
logging.info('Model Parameters: %s', params_str)
# Builds model.
model_fn = model_builder.ModelFn(params)
executor = tpu_executor.TpuExecutor(model_fn, params)
# Prepares input functions for train and eval.
train_input_fn = input_reader.InputFn(
params.train.train_file_pattern, params, mode=ModeKeys.TRAIN,
dataset_type=params.train.train_dataset_type)
if params.eval.type == 'customized':
eval_input_fn = input_reader.InputFn(
params.eval.eval_file_pattern, params, mode=ModeKeys.EVAL,
dataset_type=params.eval.eval_dataset_type)
else:
eval_input_fn = input_reader.InputFn(
params.eval.eval_file_pattern, params, mode=ModeKeys.PREDICT_WITH_GT,
dataset_type=params.eval.eval_dataset_type)
if params.eval.eval_samples:
eval_times = params.eval.eval_samples // params.eval.eval_batch_size
else:
eval_times = None
# Runs the model.
if FLAGS.mode == 'train':
config_utils.save_config(params_to_save, params.model_dir)
executor.train(train_input_fn, params.train.total_steps)
if FLAGS.eval_after_training:
executor.evaluate(eval_input_fn, eval_times)
elif FLAGS.mode == 'eval':
def terminate_eval():
logging.info('Terminating eval after %d seconds of no checkpoints',
params.eval.eval_timeout)
return True
# Runs evaluation when there's a new checkpoint.
for ckpt in tf.train.checkpoints_iterator(
params.model_dir,
min_interval_secs=params.eval.min_eval_interval,
timeout=params.eval.eval_timeout,
timeout_fn=terminate_eval):
# Terminates eval job when final checkpoint is reached.
current_step = int(six.ensure_str(os.path.basename(ckpt)).split('-')[1])
logging.info('Starting to evaluate.')
try:
executor.evaluate(eval_input_fn, eval_times, ckpt)
if current_step >= params.train.total_steps:
logging.info('Evaluation finished after training step %d',
current_step)
break
except tf.errors.NotFoundError as e:
logging.info('Erorr occurred during evaluation: NotFoundError: %s', e)
elif FLAGS.mode == 'train_and_eval':
config_utils.save_config(params_to_save, params.model_dir)
num_cycles = int(params.train.total_steps / params.eval.num_steps_per_eval)
for cycle in range(num_cycles):
logging.info('Start training cycle %d.', cycle)
current_cycle_last_train_step = ((cycle + 1)
* params.eval.num_steps_per_eval)
executor.train(train_input_fn, current_cycle_last_train_step)
executor.evaluate(eval_input_fn, eval_times)
else:
logging.info('Mode not found.')
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.app.run(main)
| {
"content_hash": "4e3d8de26d4b976c1a36ebd823d7b57f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 32.35502958579882,
"alnum_prop": 0.6744696415508412,
"repo_name": "tensorflow/tpu",
"id": "a8b797f65643aa5035f76b16ef15b40f55fa3143",
"size": "6157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/official/detection/projects/fashionpedia/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "754301"
},
{
"name": "Dockerfile",
"bytes": "2734"
},
{
"name": "Go",
"bytes": "226317"
},
{
"name": "Jupyter Notebook",
"bytes": "56231509"
},
{
"name": "Makefile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "3444271"
},
{
"name": "Shell",
"bytes": "21032"
},
{
"name": "Starlark",
"bytes": "164"
}
],
"symlink_target": ""
} |
import unittest
import azure.mgmt.compute
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
AZURE_LOCATION = 'eastus'
class TestMgmtCompute(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.compute.ComputeManagementClient
)
@unittest.skip('hard to test')
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
@recorded_by_proxy
def test_dedicated_hosts(self, resource_group):
HOST_GROUP_NAME = self.get_resource_name("hostgroup")
HOST_NAME = self.get_resource_name("hostname")
# Create or update a dedicated host group.[put]
BODY = {
"location": "eastus",
"tags": {
"department": "finance"
},
"zones": [
"1"
],
"platform_fault_domain_count": "3"
}
result = self.mgmt_client.dedicated_host_groups.create_or_update(resource_group.name, HOST_GROUP_NAME, BODY)
# Create or update a dedicated host .[put]
BODY = {
"location": "eastus",
"tags": {
"department": "HR"
},
"platform_fault_domain": "1",
"sku": {
"name": "DSv3-Type1"
}
}
result = self.mgmt_client.dedicated_hosts.begin_create_or_update(resource_group.name, HOST_GROUP_NAME, HOST_NAME, BODY)
result = result.result()
# Get a dedicated host group.[get]
result = self.mgmt_client.dedicated_host_groups.get(resource_group.name, HOST_GROUP_NAME)
# Get a dedicated host.[get]
result = self.mgmt_client.dedicated_hosts.get(resource_group.name, HOST_GROUP_NAME, HOST_NAME)
# List dedicated host groups in a resource group (TODO: need swagger file)
result = self.mgmt_client.dedicated_host_groups.list_by_resource_group(resource_group.name)
# List dedicated hosts in host group (TODO: need swagger file)
result = self.mgmt_client.dedicated_hosts.list_by_host_group(resource_group.name, HOST_GROUP_NAME)
# List dedicated host groups in a subscription (TODO: need swagger file)
result = self.mgmt_client.dedicated_host_groups.list_by_subscription()
# Update a dedicated host group.[put]
BODY = {
"tags": {
"department": "finance"
},
"platform_fault_domain_count": "3"
}
result = self.mgmt_client.dedicated_host_groups.update(resource_group.name, HOST_GROUP_NAME, BODY)
# Update a dedicated host (TODO: need swagger file )
BODY = {
"tags": {
"department": "HR"
},
}
result = self.mgmt_client.dedicated_hosts.begin_update(resource_group.name, HOST_GROUP_NAME, HOST_NAME, BODY)
result = result.result()
# Delete a dedicated host (TODO: need swagger file)
result = self.mgmt_client.dedicated_hosts.begin_delete(resource_group.name, HOST_GROUP_NAME, HOST_NAME)
result = result.result()
# Delete a dedicated host group (TODO: need swagger file)
result = self.mgmt_client.dedicated_host_groups.delete(resource_group.name, HOST_GROUP_NAME)
| {
"content_hash": "6168d2d74da7c777e28bf3e13bf4a0be",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 127,
"avg_line_length": 37.91954022988506,
"alnum_prop": 0.6217035465292513,
"repo_name": "Azure/azure-sdk-for-python",
"id": "35e4bb6212c61be77d296ffd6ab15fccd12f4848",
"size": "3697",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/tests/test_mgmt_compute_dedicated_hosts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import requests
from xml.etree import ElementTree
from datetime import datetime
from platform import python_implementation, python_version
from collections import OrderedDict
PACKAGE_VERSION = '0.1.4'
def timestamp():
return str(datetime.utcnow().isoformat())
class Api:
"""
Abstraction of the Internode API that automatically passes the provided
username and password with each request.
"""
spec_version = '1.5'
host = "https://customer-webtools-api.internode.on.net/api/v1.5"
headers = {
'User-Agent': 'internode.py/%s (%s %s, api/%s)' % (PACKAGE_VERSION, python_implementation(), python_version(), spec_version),
}
def __init__(self, username, password):
"""
Initializes Api with your Internode account username and password.
Args:
username: Username for your Internode account. (do not include
@internode.on.net)
password: Password for your Internode account.
"""
self.auth = (username, password)
def get(self, url="", **kwargs):
"""
Sends a GET request to the Internode API.
Args:
url: Additional URL part. Appended to the Internode API URL.
Returns:
API response. See:
http://docs.python-requests.org/en/latest/user/quickstart/#response-content
"""
url = "%s/%s" % (self.host, url)
response = requests.get(url, auth=self.auth, headers=self.headers, **kwargs)
# Handle missing or invalid authentication
assert response.status_code != 401, "Request failed. Authentication was missing or invalid."
# It seems possible for the server to respond with a 500 status code,
# but still send a valid body. This checks that an error message was not
# encountered.
tree = ElementTree.fromstring(response.content)
error_message = tree.find('error/msg')
assert error_message is None, "Request failed. Server responded with an error: %s" % error_message.text
return tree
class Account:
"""
Represents an Internode account. An account owns one or more Services (e.g.
Home ADSL, Business ADSL, etc.).
"""
services = {}
def __init__(self, username, password):
"""
Initializes Account with your Internode account username and password.
Args:
username: Username for your Internode account. (do not include
@internode.on.net)
password: Password for your Internode account.
"""
self.api = Api(username, password)
def get_services(self):
"""
Retrieves all of the Services associated with this account.
"""
tree = self.api.get()
services_tree = tree.find('api/services')
assert services_tree is not None, "XML was not as expected"
assert int(services_tree.get('count')) > 0, "There are no services for this account"
self.services = {}
for element in services_tree:
if element.get('type') == 'Personal_ADSL':
self.services[element.text] = Service(int(element.text), self.api)
return self.services
class Service:
def __init__(self, id, api):
"""
Initializes Service.
Args:
id: Service ID
api: Instance of the Api class. The service ID must belong to an
Account with the username and password provided to the Api instance.
"""
self.id = id
self.api = api
def get_service(self):
"""
Retrieves information about this service
"""
tree = self.api.get('/%s/service' % self.id)
service_tree = tree.find('api/service')
assert service_tree is not None, "XML was not as expected"
self.service = {}
for i in service_tree:
self.service[i.tag] = i.text
# Convert to bool where appropriate
if "excess-charged" in self.service:
self.service["excess-charged"] = self.service["excess-charged"] == 'yes'
if "excess-restrict-access" in self.service:
self.service["excess-restrict-access"] = self.service["excess-restrict-access"] == 'yes'
if "excess-shaped" in self.service:
self.service["excess-shaped"] = self.service["excess-shaped"] == 'yes'
# Convert to int where appropriate
if "id" in self.service:
self.service["id"] = int(self.service["id"])
if "quota" in self.service:
self.service["quota"] = int(self.service["quota"])
return self.service
def get_history(self, days=None, verbose=False):
"""
Retrieves usage history for this service.
Args:
verbose: When true, output will include a breakdown of the usage,
amount uploaded and downloaded, to both metered and non-metered
sources.
days: Number of days of history to include.
"""
params = {
"verbose": int(verbose)
}
if days is not None:
days = int(days) + 1
params["count"] = days
tree = self.api.get('/%s/history' % self.id, params=params)
history_tree = tree.find('api/usagelist')
assert history_tree is not None, "Response was not as expected and can not be processed further."
self.history = OrderedDict()
for element in history_tree:
total = element.find('traffic[@name="total"]')
unmetered_up = element.find('traffic[@direction="up"][@name="unmetered"]')
unmetered_down = element.find('traffic[@direction="down"][@name="unmetered"]')
metered_up = element.find('traffic[@direction="up"][@name="metered"]')
metered_down = element.find('traffic[@direction="down"][@name="metered"]')
output = {}
if total is not None:
output['total'] = int(total.text)
if unmetered_up is not None or unmetered_down is not None:
output['unmetered'] = {}
if metered_up is not None or metered_down is not None:
output['metered'] = {}
if unmetered_up is not None:
output['unmetered']['up'] = int(unmetered_up.text)
if unmetered_down is not None:
output['unmetered']['down'] = int(unmetered_down.text)
if metered_up is not None:
output['metered']['up'] = int(metered_up.text)
if metered_down is not None:
output['metered']['down'] = int(metered_down.text)
self.history[element.get('day')] = output
return self.history
def get_usage(self):
"""
Retrieves current usage information for this service
"""
tree = self.api.get('/%s/usage' % self.id)
traffic_tree = tree.find('api/traffic')
assert traffic_tree is not None, "Response was not as expected and can not be processed further."
self.usage = {}
for i in ['name', 'plan-interval', 'rollover', 'unit']:
self.usage[i] = traffic_tree.get(i)
self.usage['quota'] = int(traffic_tree.get('quota'))
self.usage['usage'] = int(traffic_tree.text)
return self.usage
| {
"content_hash": "fe091517dcd2366a1870241c46d44526",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 133,
"avg_line_length": 36.25247524752475,
"alnum_prop": 0.5922436160043698,
"repo_name": "lachlanmcdonald/internode-py",
"id": "06e18cf4daa23b4e8dc53629df9be2bbfb4cd7d8",
"size": "7323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "internode/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9999"
}
],
"symlink_target": ""
} |
"""
StackState.
Kubernetes topology extraction
Collects topology from k8s API.
"""
from collections import defaultdict
# 3rd party
import requests
# project
from checks import AgentCheck
from utils.kubernetes import KubeUtil
class KubernetesTopology(AgentCheck):
INSTANCE_TYPE = "kubernetes"
SERVICE_CHECK_NAME = "kubernetes.topology_information"
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception('Kubernetes check only supports one configured instance.')
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
inst = instances[0] if instances is not None else None
self.kubeutil = KubeUtil(init_config=init_config, instance=inst, use_kubelet=False)
if not self.kubeutil.init_success:
if self.kubeutil.left_init_retries > 0:
self.log.warning("Kubelet client failed to initialized for now, pausing the Kubernetes check.")
else:
raise Exception('Unable to initialize Kubelet client. Try setting the host parameter. The Kubernetes check failed permanently.')
def check(self, instance):
instance_key = {'type': self.INSTANCE_TYPE, 'url': self.kubeutil.kubernetes_api_root_url}
msg = None
status = None
url = self.kubeutil.kubernetes_api_url
if not url:
raise Exception('Unable to reach kubernetes. Try setting the master_name and master_port parameter.')
self.start_snapshot(instance_key)
try:
self._extract_topology(instance_key)
except requests.exceptions.Timeout as e:
# If there's a timeout
msg = "%s seconds timeout when hitting %s" % (self.kubeutil.timeoutSeconds, url)
status = AgentCheck.CRITICAL
except Exception as e:
self.log.warning('kubernetes topology check %s failed: %s' % (url, str(e)))
msg = str(e)
status = AgentCheck.CRITICAL
finally:
if status is AgentCheck.CRITICAL:
self.service_check(self.SERVICE_CHECK_NAME, status, message=msg)
self.stop_snapshot(instance_key)
def _extract_topology(self, instance_key):
self._extract_services(instance_key)
self._extract_nodes(instance_key)
self._extract_pods(instance_key)
self._link_pods_to_services(instance_key)
self._extract_deployments(instance_key)
def _extract_services(self, instance_key):
for service in self.kubeutil.retrieve_services_list()['items']:
data = dict()
data['type'] = service['spec']['type']
data['namespace'] = service['metadata']['namespace']
data['ports'] = service['spec'].get('ports', [])
data['labels'] = self._make_labels(service['metadata'])
if 'clusterIP' in service['spec'].keys():
data['cluster_ip'] = service['spec']['clusterIP']
self.component(instance_key, service['metadata']['name'], {'name': 'KUBERNETES_SERVICE'}, data)
def _extract_nodes(self, instance_key):
for node in self.kubeutil.retrieve_nodes_list()['items']:
status_addresses = node['status'].get("addresses",[])
addresses = {item['type']: item['address'] for item in status_addresses}
data = dict()
data['labels'] = self._make_labels(node['metadata'])
data['internal_ip'] = addresses.get('InternalIP', None)
data['legacy_host_ip'] = addresses.get('LegacyHostIP', None)
data['hostname'] = addresses.get('Hostname', None)
data['external_ip'] = addresses.get('ExternalIP', None)
self.component(instance_key, node['metadata']['name'], {'name': 'KUBERNETES_NODE'}, data)
def _extract_deployments(self, instance_key):
for deployment in self.kubeutil.retrieve_deployments_list()['items']:
data = dict()
externalId = "deployment: %s" % deployment['metadata']['name']
data['namespace'] = deployment['metadata']['namespace']
data['name'] = deployment['metadata']['name']
data['labels'] = self._make_labels(deployment['metadata'])
deployment_template = deployment['spec']['template']
if deployment_template and deployment_template['metadata']['labels'] and len(deployment_template['metadata']['labels']) > 0:
data['template_labels'] = self._make_labels(deployment_template['metadata'])
replicasets = self.kubeutil.retrieve_replicaset_filtered_list(deployment['metadata']['namespace'], deployment_template['metadata']['labels'])
if replicasets['items']:
for replicaset in replicasets['items']:
self.relation(instance_key, externalId, replicaset['metadata']['name'], {'name': 'CREATED'}, dict())
self.component(instance_key, externalId, {'name': 'KUBERNETES_DEPLOYMENT'}, data)
def _extract_pods(self, instance_key):
replicasets_to_pods = defaultdict(list)
replicaset_to_data = dict()
for pod in self.kubeutil.retrieve_master_pods_list()['items']:
data = dict()
pod_name = pod['metadata']['name']
data['uid'] = pod['metadata']['uid']
data['namespace'] = pod['metadata']['namespace']
data['labels'] = self._make_labels(pod['metadata'])
self.component(instance_key, pod_name, {'name': 'KUBERNETES_POD'}, data)
relation_data = dict()
if 'nodeName' in pod['spec']:
self.relation(instance_key, pod_name, pod['spec']['nodeName'], {'name': 'PLACED_ON'}, relation_data)
if 'containerStatuses' in pod['status'].keys():
if 'nodeName' in pod['spec']:
pod_node_name = pod['spec']['nodeName']
if 'podIP' in pod['status']:
pod_ip = pod['status']['podIP']
else:
pod_ip = None
if 'hostIP' in pod['status']:
host_ip = pod['status']['hostIP']
else:
host_ip = None
self._extract_containers(instance_key, pod_name, pod_ip, host_ip, pod_node_name, pod['metadata']['namespace'], pod['status']['containerStatuses'])
if 'ownerReferences' in pod['metadata'].keys():
for reference in pod['metadata']['ownerReferences']:
if reference['kind'] == 'ReplicaSet':
data = dict()
data['name'] = pod_name
replicasets_to_pods[reference['name']].append(data)
if reference['name'] not in replicaset_to_data:
replicaset_data = dict()
replicaset_data['labels'] = self._make_labels(pod['metadata'])
replicaset_data['namespace'] = pod['metadata']['namespace']
replicaset_to_data[reference['name']] = replicaset_data
for replicaset_name in replicasets_to_pods:
self.component(instance_key, replicaset_name, {'name': 'KUBERNETES_REPLICASET'}, replicaset_to_data[replicaset_name])
for pod in replicasets_to_pods[replicaset_name]:
self.relation(instance_key, replicaset_name, pod['name'], {'name': 'CONTROLS'}, dict())
def _extract_containers(self, instance_key, pod_name, pod_ip, host_ip, host_name, namespace, statuses):
for containerStatus in statuses:
container_id = containerStatus['containerID']
data = dict()
data['pod_ip'] = pod_ip
data['host_ip'] = host_ip
data['namespace'] = namespace
data['labels'] = ["namespace:%s" % namespace]
data['docker'] = {
'image': containerStatus['image'],
'container_id': container_id
}
self.component(instance_key, container_id, {'name': 'KUBERNETES_CONTAINER'}, data)
relation_data = dict()
self.relation(instance_key, pod_name, container_id, {'name': 'CONSISTS_OF'}, relation_data)
self.relation(instance_key, container_id, host_name, {'name': 'HOSTED_ON'}, relation_data)
def _link_pods_to_services(self, instance_key):
for endpoint in self.kubeutil.retrieve_endpoints_list()['items']:
service_name = endpoint['metadata']['name']
if 'subsets' in endpoint:
for subset in endpoint['subsets']:
if 'addresses' in subset:
for address in subset['addresses']:
if 'targetRef' in address.keys() and address['targetRef']['kind'] == 'Pod':
data = dict()
pod_name = address['targetRef']['name']
self.relation(instance_key, service_name, pod_name, {'name': 'EXPOSES'}, data)
@staticmethod
def extract_metadata_labels(metadata):
"""
Extract labels from metadata section coming from the kubelet API.
"""
kube_labels = defaultdict(list)
name = metadata.get("name")
namespace = metadata.get("namespace")
labels = metadata.get("labels")
if name and labels:
if namespace:
key = "%s/%s" % (namespace, name)
else:
key = name
for k, v in labels.iteritems():
kube_labels[key].append(u"%s:%s" % (k, v))
return kube_labels
def _make_labels(self, metadata):
original_labels = self._flatten_dict(KubernetesTopology.extract_metadata_labels(metadata=metadata))
if 'namespace' in metadata:
original_labels.append("namespace:%s" % metadata['namespace'])
return original_labels
def _flatten_dict(self, dict_of_list):
from itertools import chain
return sorted(set(chain.from_iterable(dict_of_list.itervalues())))
| {
"content_hash": "1af0c91919f033e91d438a9e0255c7d5",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 166,
"avg_line_length": 46.58447488584475,
"alnum_prop": 0.577827876886885,
"repo_name": "StackVista/sts-agent-integrations-core",
"id": "9792033741a77286d13595cc6abd7d92c2070a1d",
"size": "10202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes_topology/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "127"
},
{
"name": "Erlang",
"bytes": "15429"
},
{
"name": "Go",
"bytes": "1471"
},
{
"name": "Perl",
"bytes": "5845"
},
{
"name": "Python",
"bytes": "2138291"
},
{
"name": "Ruby",
"bytes": "169389"
},
{
"name": "Shell",
"bytes": "15492"
},
{
"name": "TSQL",
"bytes": "1239"
}
],
"symlink_target": ""
} |
"""
This is the SimKit :mod:`~simkit.core.models` module that contains
definitions for the :class:`~simkit.core.models.Model` class.
The SimKit model contains five layers:
:class:`~simkit.core.layers.Data`,
:class:`~simkit.core.layers.Formulas`,
:class:`~simkit.core.layers.Calculations`,
:class:`~simkit.core.layers.Outputs` and
:class:`~simkit.core.layers.Simulations`. The
:class:`~simkit.core.layers.Data` layer organizes
:ref:`data-sources` by providing methods to add and load data for SimKit.
The :class:`~simkit.core.layers.Formulas` layer loads
:ref:`formulas` used by :class:`~simkit.core.layers.Calculations`
calculations. The :class:`~simkit.core.layers.Outputs` layer
organizes the calculated outputs for use in other calculations. Finally the
:class:`~simkit.core.layers.Simulations` layer organizes
options such as how long the simulation should run and takes care of actually
running the simulation.
"""
from past.builtins import basestring
import importlib
import json
import os
import copy
from simkit.core import logging, _listify, CommonBase, Parameter
LOGGER = logging.getLogger(__name__)
LAYERS_MOD = '.layers'
LAYERS_PKG = 'simkit.core'
LAYER_CLS_NAMES = {'data': 'Data', 'calculations': 'Calculations',
'formulas': 'Formulas', 'outputs': 'Outputs',
'simulations': 'Simulations'}
class ModelParameter(Parameter):
_attrs = ['layer', 'module', 'package', 'path', 'sources']
class ModelBase(CommonBase):
"""
Base model meta class. If model has class attributes "modelpath" and
"modelfile" then layer class names and model configuration will be read from
the file on that path. Otherwise layer class names will be read from the
class attributes.
"""
_path_attr = 'modelpath'
_file_attr = 'modelfile'
_param_cls = ModelParameter
_layers_cls_attr = 'layer_cls_names'
_layers_mod_attr = 'layers_mod'
_layers_pkg_attr = 'layers_pkg'
_cmd_layer_attr = 'cmd_layer_name'
_attr_default = {
_layers_cls_attr: LAYER_CLS_NAMES, _layers_mod_attr: LAYERS_MOD,
_layers_pkg_attr: LAYERS_PKG, _cmd_layer_attr: 'simulations'
}
def __new__(mcs, name, bases, attr):
# use only with Model subclasses
if not CommonBase.get_parents(bases, ModelBase):
return super(ModelBase, mcs).__new__(mcs, name, bases, attr)
attr = mcs.set_meta(bases, attr)
# set param file full path if data source path and file specified or
# try to set parameters from class attributes except private/magic
attr = mcs.set_param_file_or_parameters(attr)
# set default meta attributes
meta = attr[mcs._meta_attr]
for ma, dflt in mcs._attr_default.items():
a = getattr(meta, ma, None)
if a is None:
setattr(meta, ma, dflt)
return super(ModelBase, mcs).__new__(mcs, name, bases, attr)
class Model(metaclass=ModelBase):
"""
A class for models. SimKit is a subclass of the :class:`Model` class.
:param modelfile: The name of the JSON file with model data.
:type modelfile: str
"""
def __init__(self, modelfile=None):
meta = getattr(self, ModelBase._meta_attr)
parameters = getattr(self, ModelBase._param_attr)
# load modelfile if it's an argument
if modelfile is not None:
#: model file
self.param_file = os.path.abspath(modelfile)
LOGGER.debug('modelfile: %s', modelfile)
else:
modelfile = self.param_file
# check meta class for model if declared inline
if parameters:
# TODO: separate model and parameters according to comments in #78
#: dictionary of the model
self.model = model = copy.deepcopy(parameters)
else:
#: dictionary of the model
self.model = model = None
# layer attributes initialized in meta class or _initialize()
# for k, v in layer_cls_names.iteritems():
# setattr(self, k, v)
# XXX: this seems bad to initialize attributes outside of constructor
#: dictionary of model layer classes
self.layers = {}
#: state of model, initialized or uninitialized
self._state = 'uninitialized'
# need either model file or model and layer class names to initialize
ready_to_initialize = ((modelfile is not None or model is not None) and
meta.layer_cls_names is not None)
if ready_to_initialize:
self._initialize() # initialize using modelfile or model
@property
def state(self):
"""
current state of the model
"""
return self._state
def _load(self, layer=None):
"""
Load or update all or part of :attr:`model`.
:param layer: Optionally load only specified layer.
:type layer: str
"""
# open model file for reading and convert JSON object to dictionary
# read and load JSON parameter map file as "parameters"
with open(self.param_file, 'r') as param_file:
file_params = json.load(param_file)
for layer, params in file_params.items():
# update parameters from file
self.parameters[layer] = ModelParameter(**params)
# if layer argument spec'd then only update/load spec'd layer
if not layer or not self.model:
# update/load model if layer not spec'd or if no model exists yet
# TODO: separate model and parameters according to comments in #78
self.model = copy.deepcopy(self.parameters)
else:
# convert non-sequence to tuple
layers = _listify(layer)
# update/load layers
for layer in layers:
self.model[layer] = copy.deepcopy(self.parameters[layer])
def _update(self, layer=None):
"""
Update layers in model.
"""
meta = getattr(self, ModelBase._meta_attr)
if not layer:
layers = self.layers
else:
# convert non-sequence to tuple
layers = _listify(layer)
for layer in layers:
# relative path to layer files from model file
path = os.path.abspath(os.path.join(meta.modelpath, layer))
getattr(self, layer).load(path)
def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.items():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.keys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized'
def load(self, modelfile, layer=None):
"""
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
"""
# read modelfile, convert JSON and load/update model
self.param_file = modelfile
self._load(layer)
self._update(layer)
def edit(self, layer, item, delete=False):
"""
Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool
"""
# get layer attribute with model data
if hasattr(self, layer):
layer_obj = getattr(self, layer)
else:
raise AttributeError('missing layer: %s', layer)
if delete:
return layer_obj
# iterate over items and edit layer
for k, v in item.items():
if k in layer_obj.layer:
layer_obj.edit(k, v) # edit layer
else:
raise AttributeError('missing layer item: %s', k)
# update model data
if k in self.model[layer]:
self.model[layer][k].update(v)
else:
raise AttributeError('missing model layer item: %s', k)
def add(self, layer, items):
"""
Add items in model.
"""
for k in items.keys():
if k in self.model[layer]:
raise Exception('item %s is already in layer %s' % (k, layer))
self.model[layer].update(items)
# this should also update Layer.layer, the layer data
# same as calling layer constructor
# so now just need to add items to the layer
for k, v in items.items():
getattr(self, layer).add(k, v['module'], v.get('package'))
def delete(self, layer, items):
"""
Delete items in model.
"""
# Use edit to get the layer obj containing item
items = _listify(items) # make items a list if it's not
layer_obj = self.edit(layer, dict.fromkeys(items), delete=True)
for k in items:
if k in layer_obj.layer:
layer_obj.delete(k)
else:
raise AttributeError('item %s missing from layer %s' %
(k, layer))
# don't need to pop items from self.model, because, self.layer
# points to the same object as the item in model!
# for example:
# (Pdb) id(self.model['data']) # same ID as layer in data
# 125639560L
# (Pdb) id(self.data.layer) # same ID as data in model
# 125639560L
def save(self, modelfile, layer=None):
"""
Save a model file.
:param modelfile: The name of the json file to save.
:type modelfile: str
:param layer: Optionally save only specified layer.
:type layer: str
"""
if layer:
obj = {layer: self.model[layer]}
else:
obj = self.model
with open(modelfile, 'w') as fp:
json.dump(obj, fp, indent=2, sort_keys=True)
@property
def registries(self):
return {layer: getattr(self, layer).reg
for layer in self.layers}
@property
def cmd_layer(self):
meta = getattr(self, ModelBase._meta_attr)
return getattr(self, meta.cmd_layer_name, NotImplemented)
@property
def commands(self):
return self.cmd_layer.reg.commands
def command(self, cmd, progress_hook=None, *args, **kwargs):
"""
Execute a model command.
:param cmd: Name of the command.
:param progress_hook: A function to which progress updates are passed.
"""
cmds = cmd.split(None, 1) # split commands and simulations
sim_names = cmds[1:] # simulations
if not sim_names:
sim_names = self.cmd_layer.reg.keys()
for sim_name in sim_names:
sim_cmd = getattr(self.cmd_layer.reg[sim_name], cmd)
sim_cmd(self, progress_hook=progress_hook, *args, **kwargs)
| {
"content_hash": "0f202160c08fb2154911ea225fc159ee",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 80,
"avg_line_length": 38.74635568513119,
"alnum_prop": 0.581339352896915,
"repo_name": "SunPower/Carousel",
"id": "276708349ee4d1f2f9eb0efb4776cb4d89c49820",
"size": "13314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simkit/core/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6990"
},
{
"name": "Makefile",
"bytes": "7664"
},
{
"name": "Python",
"bytes": "228051"
}
],
"symlink_target": ""
} |
import doctest, test_support
test_support.run_doctest(doctest)
| {
"content_hash": "7888e6db11fa78a252fd548362f56c1b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 33,
"avg_line_length": 31.5,
"alnum_prop": 0.8253968253968254,
"repo_name": "mariaantoanelam/Licenta",
"id": "f8ad6fdb6e11523c074bf7672411e09259bb68e7",
"size": "63",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Lib/test/test_doctest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31035"
},
{
"name": "HTML",
"bytes": "134311"
},
{
"name": "Java",
"bytes": "161404"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Python",
"bytes": "4053763"
}
],
"symlink_target": ""
} |
class Token:
# Token types
UNKOWN, \
ID, \
PUNCTUATION, \
INT_LIT, \
STRING_LIT, \
KEYWORD, \
COMMENT, \
BLOCK_COMMENT, \
= range(8)
def __init__(self, tokenizer, tokenId, span):
self._tokenizer = tokenizer
self._id = tokenId
self._span = span
@property
def tokenizer(self):
return self._tokenizer
@property
def span(self):
return self._span
@property
def id(self):
return self._id
@property
def body(self):
return self.tokenizer.source[self.span[0]:self.span[1]]
@property
def location(self):
'''
Returns a tuple containing line and column of this token.
'''
currIndex = 0
for lineNumber, line in enumerate(self._tokenizer.source.splitlines(True)):
lineSpan = (currIndex, currIndex + len(line))
if self._span[0] >= lineSpan[0] and self._span[1] <= lineSpan[1]:
return (lineNumber, self._span[0] - currIndex)
currIndex += len(line)
# Sanity check (should never happen)
assert(0)
@property
def locationStr(self):
'''
Returns a string pointing to the line in the original source where the token is located.
'''
# Split the source into lies
lines = self.tokenizer.source.splitlines()
# Construct an error message showing the offending token in line
locationStr = '%r\n %s ^' % (
lines[self.location[0]],
(' ' * self.location[1]))
return locationStr
def __str__(self):
return '%d %r' % (self.id, self.body)
| {
"content_hash": "ffcc1c0ae786418badcb1e7d99b6cc06",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 96,
"avg_line_length": 25.585714285714285,
"alnum_prop": 0.5203796761585706,
"repo_name": "spiricn/libIDL",
"id": "18c1bcab0e2c640920711560dc4b7831fb68a92a",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idl/lexer/Token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "128959"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.forms import model_to_dict
from django.shortcuts import get_object_or_404, render, redirect
from django.template.context import RequestContext
from .utils import slug2id
from .models import Notification
from django import get_version
from distutils.version import StrictVersion
if StrictVersion(get_version()) >= StrictVersion('1.7.0'):
from django.http import JsonResponse
else:
# Django 1.6 doesn't have a proper JsonResponse
import json
from django.http import HttpResponse
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def JsonResponse(data):
return HttpResponse(json.dumps(data, default=date_handler), content_type="application/json")
@login_required
def all(request):
"""
Index page for authenticated user
"""
if getattr(settings, 'NOTIFICATIONS_SOFT_DELETE', False):
qs = request.user.notifications.active()
else:
qs = request.user.notifications.all()
return render(request, 'notifications/list.html', {
'notifications': qs
})
actions = request.user.notifications.all()
paginator = Paginator(actions, 16) # Show 16 notifications per page
page = request.GET.get('p')
try:
action_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
action_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
action_list = paginator.page(paginator.num_pages)
return render_to_response('notifications/list.html', {
'member': request.user,
'action_list': action_list,
}, context_instance=RequestContext(request))
@login_required
def unread(request):
return render(request, 'notifications/list.html', {
'notifications': request.user.notifications.unread()
})
@login_required
def mark_all_as_read(request):
request.user.notifications.mark_all_as_read()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:all')
@login_required
def mark_as_read(request, slug=None):
id = slug2id(slug)
notification = get_object_or_404(Notification, recipient=request.user, id=id)
notification.mark_as_read()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:all')
@login_required
def mark_as_unread(request, slug=None):
id = slug2id(slug)
notification = get_object_or_404(Notification, recipient=request.user, id=id)
notification.mark_as_unread()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:all')
@login_required
def delete(request, slug=None):
_id = slug2id(slug)
notification = get_object_or_404(Notification, recipient=request.user, id=_id)
if getattr(settings, 'NOTIFICATIONS_SOFT_DELETE', False):
notification.deleted = True
notification.save()
else:
notification.delete()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:all')
def live_unread_notification_count(request):
from random import randint
data = {
'unread_count':request.user.notifications.unread().count(),
}
return JsonResponse(data)
def live_unread_notification_list(request):
try:
num_to_fetch = request.GET.get('max',5) #If they don't specify, make it 5.
num_to_fetch = int(num_to_fetch)
num_to_fetch = max(1,num_to_fetch) # if num_to_fetch is negative, force at least one fetched notifications
num_to_fetch = min(num_to_fetch,100) # put a sane ceiling on the number retrievable
except ValueError:
num_to_fetch = 5 # If casting to an int fails, just make it 5.
data = {
'unread_count':request.user.notifications.unread().count(),
'unread_list':[model_to_dict(n) for n in request.user.notifications.unread()[0:num_to_fetch]]
}
return JsonResponse(data)
| {
"content_hash": "c224478fac23a8ea33096391106115ff",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 114,
"avg_line_length": 30.375886524822697,
"alnum_prop": 0.6833994863413495,
"repo_name": "alazaro/django-notifications",
"id": "fa2d7402dac952d75d80102cd216c82573b01697",
"size": "4309",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "notifications/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1015"
},
{
"name": "JavaScript",
"bytes": "1850"
},
{
"name": "Python",
"bytes": "88333"
}
],
"symlink_target": ""
} |
import os, logging
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack, send_from_directory
from werkzeug import check_password_hash, generate_password_hash, secure_filename
# configuration
DATABASE = 'data.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.makedirs(app.config['UPLOAD_FOLDER'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def init_db():
"""Creates the database tables."""
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
top = _app_ctx_stack.top
if not hasattr(top, 'sqlite_db'):
sqlite_db = sqlite3.connect(app.config['DATABASE'])
sqlite_db.row_factory = sqlite3.Row
top.sqlite_db = sqlite_db
return top.sqlite_db
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = get_db().execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
@app.teardown_appcontext
def close_db_connection(exception):
"""Closes the database again at the end of the request."""
top = _app_ctx_stack.top
if hasattr(top, 'sqlite_db'):
top.sqlite_db.close()
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = query_db('select * from users where user_id = ?',
[session['user_id']], one=True)
@app.route('/', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
user = query_db('''select * from users where
username = ?''', [request.form['username']], one=True)
if user is None:
error = 'No such user'
elif user['password'] != request.form['password']:
error = 'Invalid username or password'
else:
session['user_id'] = user['user_id']
flash('You were logged in and can upload now')
return redirect(url_for('upload'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
flash('You were logged out')
session.pop('user_id', None)
return redirect(url_for('login'))
@app.route('/upload', methods=['GET', 'POST'])
def upload():
error = None
if not g.user:
flash('You are not logged in')
return redirect(url_for('login'))
if request.method == 'POST':
file = request.files['file']
if file is None:
error = 'Invalid file'
elif file is not None and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploads', filename=filename))
else:
error = 'The server encountered an internal error'
return render_template('upload.html',error=error)
@app.route('/uploads/<filename>')
def uploads(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename) # when you want to download the file you can add parameter: as_attachment=True
if __name__ == '__main__':
init_db()
app.run()
| {
"content_hash": "778284af01721038e406e0b42e6e37cf",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 121,
"avg_line_length": 30.91044776119403,
"alnum_prop": 0.6122646064703042,
"repo_name": "howenx/py_flask_login_upload",
"id": "d5adda20da4f0a7bf75bc635a485a242d5f81c0f",
"size": "4166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "919"
},
{
"name": "HTML",
"bytes": "1294"
},
{
"name": "Python",
"bytes": "4166"
}
],
"symlink_target": ""
} |
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F
from django.db.models.fields import AutoField
from django.db.models.functions import Trunc
from django.db.models.query_utils import InvalidQuery, Q
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size)
if connection.features.can_return_ids_from_bulk_insert:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
if param != 'pk': # It's okay to use a model's pk property.
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
def _values(self, *fields, **expressions):
clone = self._clone()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
_fields = []
expressions = {}
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id = str(id(field))
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False, skip_locked=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if connections[self.db].features.can_return_ids_from_bulk_insert:
inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db)
return inserted_ids
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare_as_filter_value(self):
if self._fields is None:
queryset = self.values('pk')
queryset.query._forced_pk = True
else:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
queryset = self._clone()
return queryset.query.as_subquery_filter(queryset._db)
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = columns.index(query_name)
columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return columns
@cached_property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
converter = connections[self.db].introspection.table_name_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and queryset._iterable_class is not ModelIterable:
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._clone(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
try:
apply_rel_filter = manager._apply_rel_filters
except AttributeError:
warnings.warn(
"The `%s.%s` class must implement a `_apply_rel_filters()` "
"method that accepts a `QuerySet` as its single "
"argument and returns an appropriately filtered version "
"of it." % (manager.__class__.__module__, manager.__class__.__name__),
RemovedInDjango20Warning,
)
qs = manager.get_queryset()
else:
qs = apply_rel_filter(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| {
"content_hash": "e0e492963e2736487cfb67e97192a00d",
"timestamp": "",
"source": "github",
"line_count": 1714,
"max_line_length": 115,
"avg_line_length": 40.45565927654609,
"alnum_prop": 0.5872139138460651,
"repo_name": "sarthakmeh03/django",
"id": "7db076cc58dfe6a0079caa6d43cb7e7b75133dbe",
"size": "69341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52170"
},
{
"name": "HTML",
"bytes": "174451"
},
{
"name": "JavaScript",
"bytes": "251434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11348046"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import MySQLdb as mdb
import sys
import os
if len(sys.argv) < 6:
sys.exit("usage: %s <job_name> <build_number> <workload> <iteration> <runtime>" % sys.argv[0])
host = os.environ["MYSQLHOST"]
user = os.environ["MYSQLUSER"]
pwd = os.environ["MYSQLPWD"]
db = os.environ["MYSQLDB"]
con = mdb.connect(host, user, pwd, db)
print "Connected to mysql"
with con:
cur = con.cursor()
job_name = sys.argv[1]
build_number = sys.argv[2]
workload = sys.argv[3]
iteration = sys.argv[4]
runtime = sys.argv[5]
cur.execute("INSERT INTO kudu_perf_tpch VALUES(%s, %s, %s, %s, %s, DEFAULT)",
(job_name, build_number, workload, iteration, runtime))
rows = cur.fetchall()
| {
"content_hash": "b01dd0434d01a3cc657c5023da212c50",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 96,
"avg_line_length": 27.36,
"alnum_prop": 0.6549707602339181,
"repo_name": "andrwng/kudu",
"id": "a6b092a7a0023909629bc6a6423bccaa13ad01fd",
"size": "1490",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/kudu/scripts/write-jobs-stats-to-mysql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423003"
},
{
"name": "C++",
"bytes": "13958441"
},
{
"name": "CMake",
"bytes": "203485"
},
{
"name": "CSS",
"bytes": "1364"
},
{
"name": "Clojure",
"bytes": "54969"
},
{
"name": "HTML",
"bytes": "24429"
},
{
"name": "Java",
"bytes": "1891723"
},
{
"name": "JavaScript",
"bytes": "5920"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Perl",
"bytes": "32137"
},
{
"name": "Python",
"bytes": "469544"
},
{
"name": "R",
"bytes": "11537"
},
{
"name": "Scala",
"bytes": "115853"
},
{
"name": "Shell",
"bytes": "106664"
},
{
"name": "Thrift",
"bytes": "59110"
}
],
"symlink_target": ""
} |
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from copy import deepcopy
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as T
env = gym.make('Go9x9-v0')
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
use_cuda = torch.cuda.is_available()
use_cuda = False
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
######################################################################
# Replay Memory
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
######################################################################
# DQN algorithm
class DQN(torch.nn.Module):
def __init__(self,
input_dim=(10, 3, 9, 9),
output_dim=None,
conv_depth=1):
super(DQN, self).__init__()
_, in_channels, in1, in2 = input_dim
n_filters = 128
self.start_conv = torch.nn.Conv2d(
in_channels,
out_channels=n_filters,
kernel_size=5,
padding=2)
self.start_relu = torch.nn.ReLU()
self.mid_convs = []
self.relus = []
for i in range(conv_depth):
self.mid_convs.append(torch.nn.Conv2d(
n_filters,
out_channels=n_filters,
kernel_size=3,
padding=1))
self.relus.append(torch.nn.ReLU())
self.last_conv = torch.nn.Conv2d(
n_filters,
out_channels=1,
kernel_size=1)
self.end_relu = torch.nn.ReLU()
self.fc = torch.nn.Linear(128*9*9, 9*9+1+1)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
x = self.start_conv(x)
x = self.start_relu(x)
for conv, relu in zip(self.mid_convs, self.relus):
x = conv(x)
x = relu(x)
# x = self.last_conv(x)
# x = self.softmax(x)
x = x.view(-1, 128*9*9)
x = self.fc(x)
x = self.softmax(x)
return x
def cuda(self):
super(DQN, self).cuda()
for conv in self.mid_convs:
conv.cuda()
######################################################################
# Training
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
model = DQN()
if use_cuda:
model.cuda()
optimizer = optim.RMSprop(model.parameters())
memory = ReplayMemory(10000)
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
return model(
Variable(state, volatile=True).type(FloatTensor)).data.max(1)[1].view(1, 1)
else:
return LongTensor([[random.randrange(2)]])
episode_durations = []
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.FloatTensor(episode_durations)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
######################################################################
# Training loop
last_sync = 0
def optimize_model():
global last_sync
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = ByteTensor(tuple(map(lambda s: s is not None,
batch.next_state)))
# We don't want to backprop through the expected action values and volatile
# will save us on temporarily changing the model parameters'
# requires_grad to False!
non_final_next_states = Variable(torch.cat([s for s in batch.next_state
if s is not None]),
volatile=True)
state_batch = Variable(torch.cat(batch.state))
action_batch = Variable(torch.cat(batch.action))
reward_batch = Variable(torch.cat(batch.reward))
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = model(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = Variable(torch.zeros(BATCH_SIZE).type(Tensor))
next_state_values[non_final_mask] = model(non_final_next_states).max(1)[0]
# Now, we don't want to mess up the loss with a volatile flag, so let's
# clear it. After this, we'll just end up with a Variable that has
# requires_grad=False
next_state_values.volatile = False
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in model.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
######################################################################
#
# Below, you can find the main training loop. At the beginning we reset
# the environment and initialize the ``state`` variable. Then, we sample
# an action, execute it, observe the next screen and the reward (always
# 1), and optimize our model once. When the episode ends (our model
# fails), we restart the loop.
#
# Below, `num_episodes` is set small. You should download
# the notebook and run lot more epsiodes.
num_episodes = 1000
for i_episode in range(num_episodes):
# Initialize the environment and state
state_np = env.reset()
state = torch.from_numpy(np.array([state_np])).float()
for t in count():
# Select and perform an action
action = select_action(state)
obs, reward, done, _ = env.step(action[0, 0])
reward = Tensor([reward])
# Observe new state
if not done:
next_state = torch.from_numpy(np.array([obs])).float()
else:
next_state = None
# Store the transition in memory
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model()
if done:
episode_durations.append(reward[0])
plot_durations()
break
print('Complete')
env.render(close=True)
env.close()
plt.ioff()
plt.show()
| {
"content_hash": "6500da7a355ef68427214d57f8b4da38",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 87,
"avg_line_length": 29.154676258992804,
"alnum_prop": 0.5895126465144972,
"repo_name": "nathbo/GO_DILab",
"id": "79bffae1ddd5849ebc93c5a99cd5b594af6e2774",
"size": "8186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/explore/go_dqn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "496"
},
{
"name": "Python",
"bytes": "320762"
},
{
"name": "Shell",
"bytes": "2336"
}
],
"symlink_target": ""
} |
"""Start Home Assistant."""
import argparse
import asyncio
import os
import platform
import subprocess
import sys
import threading
from typing import List
from homeassistant.const import REQUIRED_PYTHON_VER, RESTART_EXIT_CODE, __version__
def set_loop() -> None:
"""Attempt to use different loop."""
from asyncio.events import BaseDefaultEventLoopPolicy
if sys.platform == "win32":
if hasattr(asyncio, "WindowsProactorEventLoopPolicy"):
# pylint: disable=no-member
policy = asyncio.WindowsProactorEventLoopPolicy()
else:
class ProactorPolicy(BaseDefaultEventLoopPolicy):
"""Event loop policy to create proactor loops."""
_loop_factory = asyncio.ProactorEventLoop
policy = ProactorPolicy()
asyncio.set_event_loop_policy(policy)
def validate_python() -> None:
"""Validate that the right Python version is running."""
if sys.version_info[:3] < REQUIRED_PYTHON_VER:
print(
"Home Assistant requires at least Python {}.{}.{}".format(
*REQUIRED_PYTHON_VER
)
)
sys.exit(1)
def ensure_config_path(config_dir: str) -> None:
"""Validate the configuration directory."""
import homeassistant.config as config_util
lib_dir = os.path.join(config_dir, "deps")
# Test if configuration directory exists
if not os.path.isdir(config_dir):
if config_dir != config_util.get_default_config_dir():
print(
f"Fatal Error: Specified configuration directory {config_dir} "
"does not exist"
)
sys.exit(1)
try:
os.mkdir(config_dir)
except OSError:
print(
"Fatal Error: Unable to create default configuration "
f"directory {config_dir}"
)
sys.exit(1)
# Test if library directory exists
if not os.path.isdir(lib_dir):
try:
os.mkdir(lib_dir)
except OSError:
print(f"Fatal Error: Unable to create library directory {lib_dir}")
sys.exit(1)
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
import homeassistant.config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate."
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"--safe-mode", action="store_true", help="Start Home Assistant in safe mode"
)
parser.add_argument(
"--debug", action="store_true", help="Start Home Assistant in debug mode"
)
parser.add_argument(
"--open-ui", action="store_true", help="Open the webinterface in a browser"
)
parser.add_argument(
"--skip-pip",
action="store_true",
help="Skips pip install of required packages on startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging to file."
)
parser.add_argument(
"--pid-file",
metavar="path_to_pid_file",
default=None,
help="Path to PID file useful for running as daemon",
)
parser.add_argument(
"--log-rotate-days",
type=int,
default=None,
help="Enables daily log rotation and keeps up to the specified days",
)
parser.add_argument(
"--log-file",
type=str,
default=None,
help="Log file to write to. If not set, CONFIG/home-assistant.log is used",
)
parser.add_argument(
"--log-no-color", action="store_true", help="Disable color logs"
)
parser.add_argument(
"--runner",
action="store_true",
help=f"On restart exit with code {RESTART_EXIT_CODE}",
)
parser.add_argument(
"--script", nargs=argparse.REMAINDER, help="Run one of the embedded scripts"
)
if os.name == "posix":
parser.add_argument(
"--daemon", action="store_true", help="Run Home Assistant as daemon"
)
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, "daemon", False)
return arguments
def daemonize() -> None:
"""Move current process to daemon process."""
# Create first fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# Decouple fork
os.setsid()
# Create second fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# redirect standard file descriptors to devnull
infd = open(os.devnull, "r")
outfd = open(os.devnull, "a+")
sys.stdout.flush()
sys.stderr.flush()
os.dup2(infd.fileno(), sys.stdin.fileno())
os.dup2(outfd.fileno(), sys.stdout.fileno())
os.dup2(outfd.fileno(), sys.stderr.fileno())
def check_pid(pid_file: str) -> None:
"""Check that Home Assistant is not already running."""
# Check pid file
try:
with open(pid_file, "r") as file:
pid = int(file.readline())
except OSError:
# PID File does not exist
return
# If we just restarted, we just found our own pidfile.
if pid == os.getpid():
return
try:
os.kill(pid, 0)
except OSError:
# PID does not exist
return
print("Fatal Error: Home Assistant is already running.")
sys.exit(1)
def write_pid(pid_file: str) -> None:
"""Create a PID File."""
pid = os.getpid()
try:
with open(pid_file, "w") as file:
file.write(str(pid))
except OSError:
print(f"Fatal Error: Unable to write pid file {pid_file}")
sys.exit(1)
def closefds_osx(min_fd: int, max_fd: int) -> None:
"""Make sure file descriptors get closed when we restart.
We cannot call close on guarded fds, and we cannot easily test which fds
are guarded. But we can set the close-on-exec flag on everything we want to
get rid of.
"""
from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC
for _fd in range(min_fd, max_fd):
try:
val = fcntl(_fd, F_GETFD)
if not val & FD_CLOEXEC:
fcntl(_fd, F_SETFD, val | FD_CLOEXEC)
except OSError:
pass
def cmdline() -> List[str]:
"""Collect path and arguments to re-execute the current hass instance."""
if os.path.basename(sys.argv[0]) == "__main__.py":
modulepath = os.path.dirname(sys.argv[0])
os.environ["PYTHONPATH"] = os.path.dirname(modulepath)
return [sys.executable] + [arg for arg in sys.argv if arg != "--daemon"]
return [arg for arg in sys.argv if arg != "--daemon"]
async def setup_and_run_hass(config_dir: str, args: argparse.Namespace) -> int:
"""Set up Home Assistant and run."""
from homeassistant import bootstrap
hass = await bootstrap.async_setup_hass(
config_dir=config_dir,
verbose=args.verbose,
log_rotate_days=args.log_rotate_days,
log_file=args.log_file,
log_no_color=args.log_no_color,
skip_pip=args.skip_pip,
safe_mode=args.safe_mode,
)
if hass is None:
return 1
if args.open_ui and hass.config.api is not None:
import webbrowser
hass.add_job(webbrowser.open, hass.config.api.base_url)
return await hass.async_run()
def try_to_restart() -> None:
"""Attempt to clean up state and start a new Home Assistant instance."""
# Things should be mostly shut down already at this point, now just try
# to clean up things that may have been left behind.
sys.stderr.write("Home Assistant attempting to restart.\n")
# Count remaining threads, ideally there should only be one non-daemonized
# thread left (which is us). Nothing we really do with it, but it might be
# useful when debugging shutdown/restart issues.
try:
nthreads = sum(
thread.is_alive() and not thread.daemon for thread in threading.enumerate()
)
if nthreads > 1:
sys.stderr.write(f"Found {nthreads} non-daemonic threads.\n")
# Somehow we sometimes seem to trigger an assertion in the python threading
# module. It seems we find threads that have no associated OS level thread
# which are not marked as stopped at the python level.
except AssertionError:
sys.stderr.write("Failed to count non-daemonic threads.\n")
# Try to not leave behind open filedescriptors with the emphasis on try.
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError:
max_fd = 256
if platform.system() == "Darwin":
closefds_osx(3, max_fd)
else:
os.closerange(3, max_fd)
# Now launch into a new instance of Home Assistant. If this fails we
# fall through and exit with error 100 (RESTART_EXIT_CODE) in which case
# systemd will restart us when RestartForceExitStatus=100 is set in the
# systemd.service file.
sys.stderr.write("Restarting Home Assistant\n")
args = cmdline()
os.execv(args[0], args)
def main() -> int:
"""Start Home Assistant."""
validate_python()
set_loop()
# Run a simple daemon runner process on Windows to handle restarts
if os.name == "nt" and "--runner" not in sys.argv:
nt_args = cmdline() + ["--runner"]
while True:
try:
subprocess.check_call(nt_args)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except subprocess.CalledProcessError as exc:
if exc.returncode != RESTART_EXIT_CODE:
sys.exit(exc.returncode)
args = get_arguments()
if args.script is not None:
from homeassistant import scripts
return scripts.run(args.script)
config_dir = os.path.abspath(os.path.join(os.getcwd(), args.config))
ensure_config_path(config_dir)
# Daemon functions
if args.pid_file:
check_pid(args.pid_file)
if args.daemon:
daemonize()
if args.pid_file:
write_pid(args.pid_file)
exit_code = asyncio.run(setup_and_run_hass(config_dir, args))
if exit_code == RESTART_EXIT_CODE and not args.runner:
try_to_restart()
return exit_code
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "35830109d60f8e34896f7eca253631e3",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 87,
"avg_line_length": 30.237142857142857,
"alnum_prop": 0.6101294528961542,
"repo_name": "postlund/home-assistant",
"id": "d1d59482e6d077b7540559ff191603fcab80e866",
"size": "10583",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
"""symbal.py: Check for braces to be balanced"""
from stack import Stack
__author__ = 'Rohit Sinha'
def match_braces(opener, closer):
return "({[".index(opener) == ")}]".index(closer)
def brace_checker(brace_string):
stack = Stack()
isbal = True
index = 0
while index < len(brace_string) and isbal:
if brace_string[index] in "({[":
stack.push(brace_string[index])
else:
if stack.isEmpty():
isbal = False
else:
opener = stack.pop()
if not match_braces(opener, brace_string[index]):
isbal = False
index += 1
if stack.isEmpty() and isbal:
return True
else:
return False
# check the above code
if __name__ == '__main__':
print(brace_checker("([({})])"))
print(brace_checker("[({)]")) | {
"content_hash": "57920f1f2f78229a59c557e5e112b978",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 65,
"avg_line_length": 24.742857142857144,
"alnum_prop": 0.5311778290993071,
"repo_name": "rohitsinha54/Learning-Python",
"id": "e1560ddb7d2b490d418639ec12da095cf6aac913",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/symbal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11455"
}
],
"symlink_target": ""
} |
import basic
import heapq
def _dijkstra_reconstruct(previous, source, target):
path = []
u = target
try:
while previous[u]:
path.append(u)
u = previous[u]
except:
pass
path.append(source)
path.reverse()
return path
def dijkstra(source, target=None):
"""Implements Dijsktra's algorithm for finding the shortest distance
between a source node and every node it is connected to.
If a target is supplied, then a reconstructed path from the source
to the target will be returned as a list of nodes. Otherwise, a
dictionary with connected nodes as the keys and cumulative distance
as the values will be returned.
Parameters
----------
source: the node to find shortest paths from
target: the goal node to find a path to
Returns
-------
A list of nodes OR a dictionary of nodes"""
everything = basic.BFS(source)
dist = []
visited = set()
results = {}
previous = {}
initial_entry = (0, source)
heapq.heappush(dist, (0, source))
heap_lookup = {}
heap_lookup[source] = initial_entry
for node in everything:
if node != source:
entry = (float('inf'), node)
heapq.heappush(dist, entry)
#use a lookup table to keep track of the heap entry for each node
heap_lookup[node] = entry
while dist:
u = heapq.heappop(dist)
if target:
if u[1] == target:
return _dijkstra_reconstruct(previous, source, target)
if u[0] == float('inf'):
break
if u[1] in visited:
continue
for edge in u[1].edges():
other_node = edge.node_2
if other_node in visited:
continue
alt = heap_lookup[u[1]][0] + edge.cost
if alt < heap_lookup[other_node][0]:
entry = (alt, other_node)
#since the default entry is infinity, why bother deleting it?
#we know the item we push in will always be smaller
heapq.heappush(dist, entry)
#update the lookup table to the new entry
heap_lookup[other_node] = entry
previous[other_node] = u[1]
visited.add(u[1])
for h in heap_lookup:
results[h] = heap_lookup[h][0]
if target:
return []
return results | {
"content_hash": "92d249009f641cdb35579caeaefb0447",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 31.81578947368421,
"alnum_prop": 0.5711331679073615,
"repo_name": "mrgaaron/vgraph",
"id": "4bc655318409907fc813e13f61a503187ffb8342",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vgraph/algorithms/paths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108244"
}
],
"symlink_target": ""
} |
"""
"""
import tables, os, distutils.dir_util, scipy
class h5file():
def __init__(self,fname):
""" initialization function """
self.fname = fname
self.fhandle = None
return
def openFile(self):
""" open file self.fname """
self.fhandle = tables.openFile(self.fname, mode = "a")
return
def readWholeh5file(self):
h5file=tables.openFile(self.fname)
output={}
for group in h5file.walkGroups("/"):
output[group._v_pathname]={}
for array in h5file.listNodes(group, classname = 'Array'):
output[group._v_pathname][array.name]=array.read()
h5file.close()
return output
class outputFileClass:
def __init__(self):
""" initialization function """
self.h5Paths={}
self.h5Attribs = {}
self.fname = ''
self.title = ''
self.fhandle = None
return
def createFile(self, fname):
""" create file fname, set self.fname and self.fhandle """
self.fname = fname
try:
distutils.dir_util.mkpath(os.path.dirname(self.fname))
except:
raise IOError, 'Unable to create output path %s' % os.path.dirname(self.fname)
self.fhandle=tables.openFile(self.fname, mode = "w", title = self.title)
return
def openFile(self):
""" open file self.fname """
self.fhandle = tables.openFile(self.fname, mode = "a")
return
def closeFile(self):
""" close self.fhandle """
self.fhandle.close()
def createh5groups(self):
""" creates groups """
tvals = sorted(self.h5Paths.values())
for v0,v1 in tvals:
gp,gn = os.path.split(v0)
self.fhandle.createGroup(gp,gn,v1)
return
def createStaticArray(self,path,data,keys2do=[]):
""" creates a static array """
if len(keys2do)==0:
dp,dn = os.path.split(path)
self.fhandle.createArray(dp,dn,data,'Static array')
else:
for key in keys2do:
self.fhandle.createArray(path,key,data[key],'Static array')
return
def createDynamicArray(self,path,rec,keys2do=[]):
""" creates a dynamic array """
if len(keys2do)==0:
dp,dn = os.path.split(path)
data = rec.copy()
data.shape = (1,)+data.shape ## add integration dimension to data array
if not self.fhandle.__contains__(path):
shape = list(data.shape)
shape[0] = 0
atom = tables.Atom.from_dtype(data.dtype)
arr = self.fhandle.createEArray(dp,dn,atom,shape)
arr.flavor='numpy'
arr = self.fhandle.getNode(path)
if (len(arr.shape)>2) and (data.shape[2] != arr.shape[2]):
if data.shape[2] > arr.shape[2]:
# read array
tarr = arr.read()
# remove old node
arr.remove()
tshape=list(tarr.shape); tshape[2]=data.shape[2]-tarr.shape[2]
tarr=scipy.append(tarr,scipy.zeros(tshape)*scipy.nan,axis=2)
# create new node
shape = list(tarr.shape)
shape[0] = 0
atom = tables.Atom.from_dtype(tarr.dtype)
arr = self.fhandle.createEArray(dp,dn,atom,shape)
arr.flavor='numpy'
arr = self.fhandle.getNode(path)
# dump data
arr.append(tarr)
else:
tshape=list(data.shape); tshape[2]=arr.shape[2]-data.shape[2]
data=scipy.append(data,scipy.zeros(tshape)*scipy.nan,axis=2)
arr.append(data)
else:
for key in keys2do:
data = scipy.array(rec[key])
data.shape = (1,)+data.shape ## add integration dimension to data array
if not self.fhandle.__contains__(path+'/'+key):
shape = list(data.shape)
shape[0] = 0
atom = tables.Atom.from_dtype(data.dtype)
arr = self.fhandle.createEArray(path,key,atom,shape)
arr.flavor='numpy'
arr = self.fhandle.getNode(path+'/'+key)
if (len(arr.shape)>2) and (data.shape[2] != arr.shape[2]):
if data.shape[2] > arr.shape[2]:
# read array
tarr = arr.read()
# remove old node
arr.remove()
tshape=list(tarr.shape); tshape[2]=data.shape[2]-tarr.shape[2]
tarr=scipy.append(tarr,scipy.zeros(tshape)*scipy.nan,axis=2)
# create new node
shape = list(tarr.shape)
shape[0] = 0
atom = tables.Atom.from_dtype(tarr.dtype)
arr = self.fhandle.createEArray(path,key,atom,shape)
arr.flavor='numpy'
arr = self.fhandle.getNode(path+'/'+key)
# dump data
arr.append(tarr)
else:
tshape=list(data.shape); tshape[2]=arr.shape[2]-data.shape[2]
data=scipy.append(data,scipy.zeros(tshape)*scipy.nan,axis=2)
arr.append(data)
return
def setAtrributes(self):
for key in self.h5Attribs.keys():
for attr in self.h5Attribs[key]:
try: self.fhandle.setNodeAttr(key,attr[0],attr[1])
except: ''
return
| {
"content_hash": "c5b31c09293e91cb88c861bf1d9203f8",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 90,
"avg_line_length": 38.90196078431372,
"alnum_prop": 0.48403897849462363,
"repo_name": "jswoboda/NonMaxwellianExperiments",
"id": "c5573304fb529f2c861e0530919fa094b8b219ef",
"size": "5975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ioclass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55779"
}
],
"symlink_target": ""
} |
__all__ = [
"OATime",
"OATimer"
]
import datetime as coretime
import time
from ._env import oalog
class OATime(object):
"""Executes time queries on database, returning
consistent time view to caller"""
def __init__(self, dt=None, extcur=None):
self.cur = extcur
self.dt = dt
@property
def now(self):
if self.cur is None:
from ._dao import OADao
self.cur = OADao("openarc").cur
self.cur.execute(self.SQL.get_current_time)
return self.cur.fetchall()[0]['timezone']
def to_unixtime(self):
ms = (self.dt.microsecond/1000000.0)
timetuple = coretime.mktime(self.dt.timetuple())
return timetuple + ms
class SQL(object):
get_current_time =\
"select now() at time zone 'utc'"
class OATimer:
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
suffix = f"- [{self.identifier}]" if self.identifier is not None else str()
oalog.info(f"{self.interval*1000:>5.0f} {suffix}")
def __init__(self, identifier=None):
self.identifier = identifier
| {
"content_hash": "51b4e288c8ef2dd54c96d6b441a4289b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 26.48936170212766,
"alnum_prop": 0.5879518072289157,
"repo_name": "kchoudhu/openarc",
"id": "08f822c968c739537251f51f18fb53f982bfee56",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openarc/time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1507"
},
{
"name": "Python",
"bytes": "185208"
}
],
"symlink_target": ""
} |
"""
views
~~~~~
Routes & views for this app!
"""
from flask import (
Blueprint, render_template, jsonify, request, current_app
)
from jinja2 import TemplateNotFound
import voweler
from string import ascii_lowercase
import json
simple_page = Blueprint('simple_page', __name__,
template_folder='templates')
def render_score(score):
return "{0:.0f}%".format(score * 100.)
@simple_page.route('/')
def base():
# serve the single page app
return current_app.send_static_file('index.html')
@simple_page.route('/state')
def state():
"""
Optionally update model via 'update' querystring arg
Returns (after update):
weights of model
current predicted vowel list
"""
next_letter = request.args.get('update')
if next_letter:
current_app.vp.handle_letter(next_letter)
vocab = [
(e, current_app.vp.handle_letter(e, update=False))
for e in ascii_lowercase
]
current_score = voweler.separability.get_perceptron_accuracy(current_app.vp)
scatter_data = voweler.separability.project_to_1d(
current_app.vp.to_vec, current_app.vp.w
)
return jsonify(
weights=[round(x, 2) for x in current_app.vp.w],
vowels=[
l
for l in ascii_lowercase
if current_app.vp.handle_letter(l, update=False) == 1.
],
scatter_data=scatter_data,
current_score=render_score(current_score)
)
@simple_page.route('/size')
def set_size():
size = request.args.get('size')
perceptron_kwargs = {}
if size:
perceptron_kwargs['input_width'] = int(size)
current_app.vp = voweler.perceptron.VowelPerceptron(**perceptron_kwargs)
(x_data, y_data, score) = voweler.separability.assess_embedding(
current_app.vp.to_vec)
return jsonify(
reset=True,
scatter_data=voweler.separability.get_c3_payload(x_data, y_data),
score=render_score(score)
)
| {
"content_hash": "1a74491cf9c7b7c89cfc5d316dfc4c50",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 26.83783783783784,
"alnum_prop": 0.6299093655589124,
"repo_name": "bhtucker/perceptron_viz",
"id": "ff87071c90cdd3576813448fba89734e202ffbc9",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voweler/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "404"
},
{
"name": "HTML",
"bytes": "5750"
},
{
"name": "JavaScript",
"bytes": "9908"
},
{
"name": "Python",
"bytes": "8153"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name = 'Raspberry_SDK',
packages = ['Raspberry_SDK'],
version = '0.1.4',
description = 'Countly IoT Analytics SDK for Raspberry Pi devices',
author = 'Countly',
author_email = 'hello@count.ly',
url = 'https://github.com/countly/countly-sdk-python',
download_url = 'https://github.com/countly/countly-sdk-python/tarball/0.1.4',
keywords = ['countly', 'iot', 'pyhon'],
classifiers = [],
) | {
"content_hash": "3c997493ae72fbd325587f9a1889ef9e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 34.30769230769231,
"alnum_prop": 0.6748878923766816,
"repo_name": "Countly/countly-sdk-iot-python",
"id": "ea0a322e1fc038e80397da0608baa2f21154af3e",
"size": "446",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "raspberry_pi/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9741"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.