text stringlengths 4 1.02M | meta dict |
|---|---|
"""
hellofriend.py
Author: Sam Supattapone
Credit: J.C. Napier
Assignment: Hello, friend!
Write and submit an interactive Python program that asks for the user's name and age,
then prints how much older Python is than the user (based on a simple comparison of
birth year). Python's first public release occurred in 1991. Something like this:
Please tell me your name: Guido
Please tell me your age: 16
Hello, Guido. Python is 8 years older than you are!
Note that the text: "Guido" and "16" are entered by the user running the program.
The final line ("Hello...") is generated dynamically when you run the program, based
on the name and age that the user enters.
"""
user = input("Please tell me your name: ")
age = input("Please tell me your age: ")
num=str(24-int(age))
print("Hello, "+user+". Python is "+num+" years older than you are!") | {
"content_hash": "da55094b998494eee152940a3a78a72a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 86,
"avg_line_length": 36.91304347826087,
"alnum_prop": 0.7373380447585395,
"repo_name": "SSupattapone/Hello-friend",
"id": "243d01527c64c853484e9655f8868996358da2a8",
"size": "849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hellofriend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "849"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import re
import sys
import tarfile
import urllib
from urllib import request
from urllib import parse
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Please install BeautifulSoup (apt-get install python3-bs4 or pip install beautifulsoup4 should do it)")
parser = argparse.ArgumentParser(description='Docenizes HTML version of the official Intel Asm PDFs')
parser.add_argument('-i', '--inputfolder', type=str,
help='Folder where the input files reside as .html. Default is ./asm-docs/',
default='asm-docs')
parser.add_argument('-o', '--outputpath', type=str, help='Final path of the .js file. Default is ./asm-docs.js',
default='./asm-docs.js')
parser.add_argument('-d', '--downloadfolder', type=str,
help='Folder where the archive will be downloaded and extracted', default='asm-docs')
# The maximum number of paragraphs from the description to copy.
MAX_DESC_PARAS = 5
STRIP_PREFIX = re.compile(r'^(([0-9a-fA-F]{2}|m64|NP|(REX|E?VEX\.)[.0-9A-Z]*|/[0-9a-z]+|[a-z]+)\b\s*)*')
INSTRUCTION_RE = re.compile(r'^([A-Z][A-Z0-9]+)\*?(\s+|$)')
# Some instructions are so broken we just take their names from the filename
UNPARSEABLE_INSTR_NAMES = ['PSRLW:PSRLD:PSRLQ', 'PSLLW:PSLLD:PSLLQ', 'MOVBE']
# Some files contain instructions which cannot be parsed and which compilers are unlikely to emit
IGNORED_FILE_NAMES = [
# SGX pseudo-instructions
"EADD",
"EACCEPT",
"EAUG",
"EACCEPTCOPY",
"EDECVIRTCHILD",
"EINCVIRTCHILD",
"EINIT",
"ELDB:ELDU:ELDBC:ELBUC",
"EMODPE",
"EMODPR",
"EMODT",
"ERDINFO",
"ESETCONTEXT",
"ETRACKC",
"EBLOCK",
"ECREATE",
"EDBGRD",
"EDBGWR",
"EENTER",
"EEXIT",
"EEXTEND",
"EGETKEY",
"ELDB",
"ELDU",
"ENCLS",
"ENCLU",
"EPA",
"EREMOVE",
"EREPORT",
"ERESUME",
"ETRACK",
"EWB",
# VMX instructions
"INVEPT",
"INVVPID",
"VMCALL",
"VMCLEAR",
"VMFUNC",
"VMLAUNCH",
"VMLAUNCH:VMRESUME",
"VMPTRLD",
"VMPTRST",
"VMREAD",
"VMRESUME",
"VMWRITE",
"VMXOFF",
"VMXON",
# Other instructions
"INVLPG",
"LAHF",
"RDMSR",
"SGDT",
# Unparsable instructions
# These instructions should be supported in the future
"MONITOR",
"MOVDQ2Q",
"MFENCE",
]
# Some instructions are defined in multiple files. We ignore a specific set of the
# duplicates here.
IGNORED_DUPLICATES = [
'MOV-1', # move to control reg
'MOV-2', # move to debug reg
'CMPSD', # compare doubleword (defined in CMPS:CMPSB:CMPSW:CMPSD:CMPSQ)
'MOVQ', # defined in MOVD:MOVQ
'MOVSD', # defined in MOVS:MOVSB:MOVSW:MOVSD:MOVSQ
'VPBROADCASTB:VPBROADCASTW:VPBROADCASTD:VPBROADCASTQ', # defined in VPBROADCAST
"VGATHERDPS:VGATHERDPD",
"VGATHERQPS:VGATHERQPD",
"VPGATHERDD:VPGATHERQD",
"VPGATHERDQ:VPGATHERQQ",
]
# Where to extract the asmdoc archive.
ASMDOC_DIR = "asm-docs"
ARCHIVE_URL = "http://www.felixcloutier.com/x86/x86.tbz2"
ARCHIVE_NAME = "x86.tbz2"
class Instruction(object):
def __init__(self, name, names, tooltip, body):
self.name = name
self.names = names
self.tooltip = tooltip.rstrip(': ,')
self.body = body
def __str__(self):
return f"{self.name} = {self.tooltip}\n{self.body}"
def get_url_for_instruction(instr):
return f"http://www.felixcloutier.com/x86/{urllib.parse.quote(instr.name)}.html"
def download_asm_doc_archive(downloadfolder):
if not os.path.exists(downloadfolder):
print(f"Creating {downloadfolder} as download folder")
os.makedirs(downloadfolder)
elif not os.path.isdir(downloadfolder):
print(f"Error: download folder {downloadfolder} is not a directory")
sys.exit(1)
archive_name = os.path.join(downloadfolder, ARCHIVE_NAME)
print("Downloading archive...")
urllib.request.urlretrieve(ARCHIVE_URL, archive_name)
def extract_asm_doc_archive(downloadfolder, inputfolder):
print("Extracting file...")
if os.path.isdir(os.path.join(inputfolder, "html")):
for root, dirs, files in os.walk(os.path.join(inputfolder, "html")):
for file in files:
if os.path.splitext(file)[1] == ".html":
os.remove(os.path.join(root, file))
tar = tarfile.open(os.path.join(downloadfolder, ARCHIVE_NAME))
tar.extractall(path=inputfolder)
def strip_non_instr(i):
# removes junk from encodings where the opcode is in the middle
# of prefix stuff. e.g.
# 66 0f 38 30 /r PMOVZXBW xmm1, xmm2/m64
return STRIP_PREFIX.sub('', i)
def instr_name(i):
match = INSTRUCTION_RE.match(strip_non_instr(i))
if match:
return match.group(1)
def get_description_paragraphs(document_soup):
description_header_node = document_soup.find(id="description")
i = 0
description_paragraph_node = description_header_node.next_sibling.next_sibling
description_paragraphs = []
while i < MAX_DESC_PARAS and len(description_paragraph_node.text) > 20:
if description_paragraph_node.name == "p":
description_paragraphs.append(description_paragraph_node)
i = i + 1
# Move two siblings forward. Next sibling is the line feed.
description_paragraph_node = description_paragraph_node.next_sibling.next_sibling
return description_paragraphs
def parse(filename, f):
doc = BeautifulSoup(f, 'html.parser')
if doc.table is None:
print(f"{filename}: Failed to find table")
return None
table = read_table(doc.table)
names = set()
def add_all(instrs):
for i in instrs:
instruction_name = instr_name(i)
if instruction_name:
names.add(instruction_name)
for inst in table:
if 'Opcode/Instruction' in inst:
add_all(inst['Opcode/Instruction'].split("\n"))
elif 'OpcodeInstruction' in inst:
add_all(inst['OpcodeInstruction'].split("\n"))
elif 'Opcode Instruction' in inst:
add_all(inst['Opcode Instruction'].split("\n"))
elif 'Opcode*/Instruction' in inst:
add_all(inst['Opcode*/Instruction'].split("\n"))
elif 'Opcode / Instruction' in inst:
add_all(inst['Opcode / Instruction'].split("\n"))
elif 'Instruction' in inst:
instruction_name = instr_name(inst['Instruction'])
if not instruction_name:
print(f"Unable to get instruction from: {inst['Instruction']}")
else:
names.add(instruction_name)
# else, skip the line
if not names:
if filename in UNPARSEABLE_INSTR_NAMES:
for inst in filename.split(":"):
names.add(inst)
else:
print(f"{filename}: Failed to read instruction table")
return None
description_paragraphs = get_description_paragraphs(doc)
for para in description_paragraphs:
for link in para.find_all('a'):
# this urljoin will only ensure relative urls are prefixed
# if a url is already absolute it does nothing
link['href'] = urllib.parse.urljoin('http://www.felixcloutier.com/x86/', link['href'])
link['target'] = '_blank'
link['rel'] = 'noreferrer noopener'
return Instruction(
filename,
names,
description_paragraphs[0].text.strip(),
''.join(map(lambda x: str(x), description_paragraphs)).strip())
def read_table(table):
# Finding all 'th' is not enough, since some headers are 'td'.
# Instead, walk through all children of the first 'tr', filter out those
# that are only whitespace, keep `get_text()` on the others.
headers = list(
map(lambda th: th.get_text(),
filter(lambda th: str(th).strip(), table.tr.children)))
result = []
if headers:
# common case
for row in table.find_all('tr'):
obj = {}
for column, name in zip(row.find_all('td'), headers):
# Remove '\n's in names that contain it.
obj[name.replace('\n', '')] = column.get_text()
if obj:
result.append(obj)
else:
# Cases like BEXTR and BZHI
rows = table.find_all('tr')
if len(rows) != 1:
return []
obj = {}
for td in rows[0].find_all('td'):
header = td.p.strong.get_text()
td.p.strong.decompose()
obj[header] = td.get_text()
result.append(obj)
return result
def parse_html(directory):
print("Parsing instructions...")
instructions = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".html") and file != 'index.html':
with open(os.path.join(root, file), encoding='utf-8') as f2:
name = os.path.splitext(file)[0]
if name in IGNORED_DUPLICATES or name in IGNORED_FILE_NAMES:
continue
try:
instruction = parse(name, f2)
if not instruction:
continue
patch_instruction(instruction)
instructions.append(instruction)
except Exception as e:
print(f"Error parsing {name}:\n{e}")
return instructions
def self_test(instructions, directory):
# For each generated instruction, check that there is a path to a file in
# the documentation.
directory = os.path.join(directory, "html")
ok = True
for inst in instructions:
if not os.path.isfile(os.path.join(directory, inst.name + ".html")):
print(f"Warning: {inst.name} has not file associated")
ok = False
return ok
def patch_instruction(instruction):
if instruction.name == "ADDSS":
print("\nPatching ADDSS")
print("REMINDER: Check if https://github.com/compiler-explorer/compiler-explorer/issues/2380 is still relevant\n")
old_body = instruction.body
old_tooltip = instruction.tooltip
instruction.body = old_body.replace("stores the double-precision", "stores the single-precision")
instruction.tooltip = old_tooltip.replace("stores the double-precision", "stores the single-precision")
def main():
args = parser.parse_args()
print(f"Called with: {args}")
# If we don't have the html folder already...
if not os.path.isdir(os.path.join(args.inputfolder, 'html')):
# We don't, try with the compressed file
if not os.path.isfile(os.path.join(args.downloadfolder, "x86.tbz2")):
# We can't find that either. Download it
try:
download_asm_doc_archive(args.downloadfolder)
extract_asm_doc_archive(args.downloadfolder, args.inputfolder)
except IOError as e:
print("Error when downloading archive:")
print(e)
sys.exit(1)
else:
# We have a file already downloaded
extract_asm_doc_archive(args.downloadfolder, args.inputfolder)
instructions = parse_html(args.inputfolder)
instructions.sort(key=lambda b: b.name)
self_test(instructions, args.inputfolder)
all_inst = set()
for inst in instructions:
if not all_inst.isdisjoint(inst.names):
print(f"Overlap in instruction names: {inst.names.intersection(all_inst)} for {inst.name}")
all_inst = all_inst.union(inst.names)
if not self_test(instructions, args.inputfolder):
print("Tests do not pass. Not writing output file. Aborting.")
sys.exit(3)
print(f"Writing {len(instructions)} instructions")
with open(args.outputpath, 'w') as f:
f.write("""
export function getAsmOpcode(opcode) {
if (!opcode) return;
switch (opcode.toUpperCase()) {
""")
for inst in instructions:
for name in inst.names:
f.write(f' case "{name}":\n')
f.write(' return {}'.format(json.dumps({
"tooltip": inst.tooltip,
"html": inst.body,
"url": get_url_for_instruction(inst)
}, indent=16, separators=(',', ': '), sort_keys=True))[:-1] + ' };\n\n')
f.write("""
}
}
""")
if __name__ == '__main__':
main()
| {
"content_hash": "a54ff571e3716530865620e37bb9262e",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 126,
"avg_line_length": 34.671270718232044,
"alnum_prop": 0.6035375667277508,
"repo_name": "mattgodbolt/compiler-explorer",
"id": "f1b69fd68a68ab9598330fb93e09e68168a20ed8",
"size": "12599",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "etc/scripts/docenizers/docenizer.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "690848"
},
{
"name": "Batchfile",
"bytes": "2758"
},
{
"name": "C++",
"bytes": "237"
},
{
"name": "CSS",
"bytes": "26253"
},
{
"name": "Go",
"bytes": "280"
},
{
"name": "HTML",
"bytes": "62121"
},
{
"name": "Java",
"bytes": "300"
},
{
"name": "JavaScript",
"bytes": "1281944"
},
{
"name": "Makefile",
"bytes": "4593"
},
{
"name": "PHP",
"bytes": "601"
},
{
"name": "Python",
"bytes": "29840"
},
{
"name": "Shell",
"bytes": "1978"
}
],
"symlink_target": ""
} |
"""Test utilities for running dtf tests"""
from __future__ import absolute_import
from __future__ import print_function
import json
import os
import shutil
import sys
import tempfile
import unittest
import zipfile
from subprocess import Popen, PIPE
import configparser
import dtf.constants as constants
import dtf.core.utils as utils
import dtf.globals as gbls
import dtf.adb as adb
DTF_CONFIG = utils.CONFIG_FILE_NAME
DTF_LOG_FILE = utils.LOG_FILE_NAME
LOCAL_MODULES_DIRECTORY = utils.LOCAL_MODULES_DIRECTORY
REPORTS_DIRECTORY = utils.REPORTS_DIRECTORY
DTF_DATA_DIR = gbls.DTF_DATA_DIR
DTF_BINARIES_DIR = gbls.DTF_BINARIES_DIR
DTF_LIBRARIES_DIR = gbls.DTF_LIBRARIES_DIR
DTF_MODULES_DIR = gbls.DTF_MODULES_DIR
DTF_PACKAGES_DIR = gbls.DTF_PACKAGES_DIR
DTF_DB = gbls.DTF_DB
TEST_TOP = os.getcwd()
class Result(object): # pylint: disable=too-few-public-methods
"""Wrapper for stdout/error and return code"""
def __init__(self, return_code, stdout, stderr):
"""Initialize object"""
self.return_code = return_code
self.stdout = stdout
self.stderr = stderr
@property
def json(self):
"""JSONify output"""
return json.loads(self.stdout)
class DataFile(object): # pylint: disable=too-few-public-methods
"""Wrapper for opening using included files"""
def __init__(self, file_name):
"""Attempt to open file"""
full_file_name = "%s/tests/data-files/%s" % (TEST_TOP, file_name)
if not os.path.isfile(full_file_name):
raise OSError
self.full_file_name = full_file_name
self.file_handle = open(full_file_name, 'rb')
def read(self):
"""Read from file handle"""
return self.file_handle.read()
def __str__(self):
"""Return full path"""
return self.full_file_name
class DataZip(object): # pylint: disable=too-few-public-methods
"""Wrapper for opening a zip file of content"""
def __init__(self, file_name):
"""Open zip to temp dir"""
full_zip_name = "%s/tests/data-files/%s" % (TEST_TOP, file_name)
if not os.path.isfile(full_zip_name):
raise OSError
if not zipfile.is_zipfile(full_zip_name):
raise OSError
self.zip_f = zipfile.ZipFile(full_zip_name)
self.temp_dir = tempfile.mkdtemp()
for name in self.zip_f.namelist():
self.zip_f.extract(name, self.temp_dir)
def close(self):
"""Close the DataZip"""
self.zip_f.close()
utils.delete_tree(self.temp_dir)
def __str__(self):
"""Return full path"""
return self.temp_dir
class IntegrationTest(unittest.TestCase):
"""Class for performing dtf integration testing"""
config = None
def setUp(self):
"""Setup project with default config"""
# Set up a default config, and store
self.config = self.default_config()
self.update_config(self.config)
# Create directory structure if not there.
if not os.path.isdir(DTF_DATA_DIR):
os.mkdir(DTF_DATA_DIR)
os.mkdir(DTF_BINARIES_DIR)
os.mkdir(DTF_LIBRARIES_DIR)
os.mkdir(DTF_MODULES_DIR)
os.mkdir(DTF_PACKAGES_DIR)
# Create the rest of the content.
utils.touch(DTF_LOG_FILE)
os.mkdir(LOCAL_MODULES_DIRECTORY)
os.mkdir(REPORTS_DIRECTORY)
def tearDown(self):
"""Remove mock project"""
# Remove all content and main.db
utils.delete_file(DTF_DB)
utils.delete_file(DTF_CONFIG)
utils.delete_file(DTF_LOG_FILE)
utils.delete_tree(LOCAL_MODULES_DIRECTORY)
utils.delete_tree(REPORTS_DIRECTORY)
@classmethod
def default_config(cls):
"""Placeholder for default config"""
return None
@classmethod
def run_cmd(cls, cmd, input_data=None):
"""Run a dtf command"""
rtn = dtf(cmd, input_data=input_data)
print(rtn.stdout, rtn.stderr)
return rtn
@classmethod
def run_check(cls, cmd):
"""Run a dtf_check command"""
rtn = dtf_check(cmd)
print(rtn.stdout, rtn.stderr)
return rtn
@classmethod
def update_config_raw(cls, contents):
"""Update config to contents supplied"""
with open(DTF_CONFIG, 'w') as conf_f:
conf_f.write(contents)
@classmethod
def update_config(cls, cfg):
"""Update config to Config object"""
with open(DTF_CONFIG, 'w') as conf_f:
cfg.write(conf_f)
class BasicIntegrationTest(IntegrationTest):
"""Default test for offline integration test"""
@classmethod
def default_config(cls):
"""Return basic config for offline"""
config = configparser.RawConfigParser()
config.add_section('Info')
config.set('Info', 'sdk', constants.API_MAX)
config.set('Info', 'serial', 'emulator-5554')
config.add_section('Client')
config.set('Client', 'mode', adb.MODE_USB)
return config
class BasicIntegrationDeviceTest(IntegrationTest):
"""Default test for online integration test"""
@classmethod
def default_config(cls):
"""Return basic config for online"""
config = configparser.RawConfigParser()
config.add_section('Info')
config.set('Info', 'sdk', constants.API_MAX)
config.set('Info', 'serial', 'emulator-5554')
config.add_section('Client')
config.set('Client', 'mode', adb.MODE_USB)
return config
def get_default_config(api=constants.API_MAX):
"""Factory for creating a config"""
config = configparser.RawConfigParser()
config.add_section('Info')
config.set('Info', 'sdk', api)
return config
# Taken from awscli/testutils.py
def get_stdout_encoding():
"""Determine encoding of stdout"""
encoding = getattr(sys.__stdout__, 'encoding', None)
if encoding is None:
encoding = 'utf-8'
return encoding
def dtf(command, input_data=None):
"""Run a dtf command"""
env = os.environ.copy()
env['GLOG_LEVEL'] = '5'
full_command = "dtf %s" % (command)
process = Popen(full_command, stdout=PIPE, stderr=PIPE, stdin=PIPE,
shell=True, env=env)
stdout_encoding = get_stdout_encoding()
kwargs = {}
if input_data:
kwargs = {'input': input_data}
stdout, stderr = process.communicate(**kwargs)
return Result(process.returncode,
stdout.decode(stdout_encoding),
stderr.decode(stdout_encoding))
def dtf_check(command):
"""Invoke the dtf_checker script"""
env = os.environ.copy()
env['GLOG_LEVEL'] = '5'
full_command = "dtf_check %s" % (command)
process = Popen(full_command, stdout=PIPE, stderr=PIPE, stdin=PIPE,
shell=True, env=env)
stdout_encoding = get_stdout_encoding()
kwargs = {}
stdout, stderr = process.communicate(**kwargs)
return Result(process.returncode,
stdout.decode(stdout_encoding),
stderr.decode(stdout_encoding))
def deploy_config_file(file_name):
"""Deploy project config from file"""
config_name = "tests/data-files/%s" % file_name
shutil.copy(config_name, DTF_CONFIG)
# Create a log file.
open(DTF_LOG_FILE, 'w').close()
os.mkdir(LOCAL_MODULES_DIRECTORY)
def deploy_config_raw(contents):
"""Deploy a project from a string"""
with open(DTF_CONFIG, 'w') as conf_f:
conf_f.write(contents)
# Create a log file.
open(DTF_LOG_FILE, 'w').close()
os.mkdir(LOCAL_MODULES_DIRECTORY)
def deploy_config(cfg):
"""Deploy actual ConfigParser object"""
with open(DTF_CONFIG, 'w') as conf_f:
cfg.write(conf_f)
# Create a log file.
open(DTF_LOG_FILE, 'w').close()
os.mkdir(LOCAL_MODULES_DIRECTORY)
def undeploy():
"""Delete the test project"""
utils.delete_file(DTF_CONFIG)
utils.delete_file(DTF_LOG_FILE)
utils.delete_tree(LOCAL_MODULES_DIRECTORY)
| {
"content_hash": "d2d2d952cc2be3c2541e9dce38bc103f",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 73,
"avg_line_length": 22.21095890410959,
"alnum_prop": 0.6197113605526089,
"repo_name": "android-dtf/dtf",
"id": "cba6aece4b7f6695e085f08b1086050468c95db0",
"size": "8747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python-dtf/dtf/testutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "26207"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "280892"
},
{
"name": "Shell",
"bytes": "27870"
}
],
"symlink_target": ""
} |
def func(x, y):
return x * y
print(func('abc', 3))
# abcabcabc
print(func(4, 3))
# 12
def func_annotations(x: 'description-x', y: 'description-y') -> 'description-return':
return x * y
print(func_annotations('abc', 3))
# abcabcabc
print(func_annotations(4, 3))
# 12
def func_annotations_default(x: 'description-x', y: 'description-y' = 3) -> 'description-return':
return x * y
print(func_annotations_default('abc'))
# abcabcabc
print(func_annotations_default(4))
# 12
def func_annotations_type(x: str, y: int) -> str:
return x * y
print(func_annotations_type('abc', 3))
# abcabcabc
print(func_annotations_type(4, 3))
# 12
def func_annotations(x: 'description-x', y: 'description-y') -> 'description-return':
return x * y
print(type(func_annotations.__annotations__))
# <class 'dict'>
print(func_annotations.__annotations__)
# {'x': 'description-x', 'y': 'description-y', 'return': 'description-return'}
print(func_annotations.__annotations__['x'])
# description-x
| {
"content_hash": "bfa1319faca20c4e5f4f31e26da40b14",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 97,
"avg_line_length": 21.25531914893617,
"alnum_prop": 0.6676676676676677,
"repo_name": "nkmk/python-snippets",
"id": "b597f67178a781f78d2f7628bea8f47b0e64b481",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebook/function_annotations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5734214"
},
{
"name": "Python",
"bytes": "1619882"
},
{
"name": "Shell",
"bytes": "12097"
}
],
"symlink_target": ""
} |
from dart.client.python.dart_client import Dart
from dart.model.action import Action
from dart.model.action import ActionData
from dart.model.dataset import FileFormat, Compression
from dart.model.dataset import RowFormat
from dart.model.datastore import Datastore
from dart.model.datastore import DatastoreData
from dart.model.datastore import DatastoreState
if __name__ == '__main__':
dart = Dart('localhost', 5000)
assert isinstance(dart, Dart)
datastore = dart.save_datastore(Datastore(
data=DatastoreData(
name='amaceiras_beacon_native_app_null_coupons_issue',
engine_name='emr_engine',
state=DatastoreState.ACTIVE,
args={
# 'data_to_freespace_ratio': 0.05,
'instance_count': 5,
}
)
))
print 'created datastore: %s' % datastore.id
actions = dart.save_actions(
actions=[
Action(data=ActionData('start_datastore', 'start_datastore')),
Action(data=ActionData('load_dataset', 'load_dataset', args={
'dataset_id': 'URBA9XEQEF',
's3_path_start_prefix_inclusive': 's3://example-bucket/nb.retailmenot.com/parsed_logs/2015/33/beacon-v2-2015-08-18',
# 's3_path_end_prefix_exclusive': 's3://example-bucket/nb.retailmenot.com/parsed_logs/2015/31/beacon-v2-2015-08-01',
's3_path_regex_filter': '.*\\.tsv',
'target_file_format': FileFormat.PARQUET,
'target_row_format': RowFormat.NONE,
'target_compression': Compression.SNAPPY,
})),
],
datastore_id=datastore.id
# datastore_id='JK4PVO2KZA'
)
print 'created action: %s' % actions[0].id
| {
"content_hash": "8ed3f763e5a39a8082513d34a9ea2920",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 132,
"avg_line_length": 40.674418604651166,
"alnum_prop": 0.6186392224128073,
"repo_name": "RetailMeNotSandbox/dart",
"id": "fbe0d61b2cc7f55b4de9417dccab20deb2d98629",
"size": "1749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dart/client/python/examples/emr_engine/load_beacon_native_app_parsed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
urlpatterns = patterns('custom.apps.wisepill.views',
url(r'^device/?$', 'device_data', name='wisepill_device_event'),
url(r'^export/events/', 'export_events', name='export_wisepill_events'),
)
| {
"content_hash": "9fc2d2853c966617bccda35921256148",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 76,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.6909871244635193,
"repo_name": "puttarajubr/commcare-hq",
"id": "40762fb878844b57c9469511c85cbd948cbe14e5",
"size": "233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custom/apps/wisepill/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
from modules.Evaluator import Evaluator
## This command evaluates system's performance
# @author Adriano Zanette
# @version 0.1
class Evaluate:
## Class constuctor
# @author Adriano Zanette
# @version 0.1
# @return Evaluate
def __init__(self):
pass
## Execute the evaluation
# @author Adriano Zanette
# @version 0.1
def run(self):
evaluator = Evaluator()
verbList = evaluator.verbList
if len(verbList) == 0:
evaluator.evaluate()
elif len(verbList) == 1:
evaluator.verbHistogram(verbList[0])
else:
evaluator.evaluate(verbList) | {
"content_hash": "28701323b562b2212cc2aaf5ffd09e3d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 46,
"avg_line_length": 22.615384615384617,
"alnum_prop": 0.6751700680272109,
"repo_name": "adzanette/scf-extractor",
"id": "79a5e738bc5c939aca206c0fb8f5c807e383942e",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scf-extractor/command/Evaluate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "51698"
},
{
"name": "PHP",
"bytes": "131430"
},
{
"name": "Python",
"bytes": "423162"
}
],
"symlink_target": ""
} |
class OpsyError(Exception):
"""Base class for exceptions in opsy."""
class NoConfigFile(OpsyError):
"""Config file not found."""
class NoConfigSection(OpsyError):
"""Config section not found."""
class MissingConfigOption(OpsyError):
"""Missing a required option in the config file."""
class DuplicateError(OpsyError):
"""Resource with this attribute already exists."""
| {
"content_hash": "1866228933f09137957abe8cbbde507d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 22.055555555555557,
"alnum_prop": 0.707808564231738,
"repo_name": "testeddoughnut/opsy",
"id": "235675d7bbac90309cc9f93346aa44712e39f12a",
"size": "399",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opsy/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "267814"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models
from . import views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/login_required/$',
login_required(TemplateView.as_view())),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>[0-9]+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>[0-9]+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>[0-9]+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/bypkignoreslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bypkandslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view(query_pk_and_slug=True)),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>[0-9]+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>[0-9]+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>[0-9]+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>[0-9]+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>[0-9]+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/authors/create/interpolate_redirect_nonascii/$',
views.NaiveAuthorCreate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^[eé]dit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect_nonascii/$',
views.NaiveAuthorUpdate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^[eé]dit/author/(?P<pk>[0-9]+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted=%(id)s')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect_nonascii/$',
views.NaiveAuthorDelete.as_view(success_url='/%C3%A9dit/authors/create/?deleted={id}')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
url(r'^dates/books/sortedbyname/$',
views.BookArchive.as_view(ordering='name')),
url(r'^dates/books/sortedbynamedec/$',
views.BookArchive.as_view(ordering='-name')),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>[0-9]+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
url(r'^list/books/sorted/$',
views.BookList.as_view(ordering='name')),
url(r'^list/books/sortedbypagesandnamedec/$',
views.BookList.as_view(ordering=('pages', '-name'))),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>[0-9]{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/sortedbyname/$',
views.BookYearArchive.as_view(make_object_list=True, ordering='name')),
url(r'^dates/books/(?P<year>\d{4})/sortedbypageandnamedec/$',
views.BookYearArchive.as_view(make_object_list=True, ordering=('pages', '-name'))),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/get_object_custom_queryset/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', auth_views.login)
]
| {
"content_hash": "80c56973cf81e07efe6cfcdb8b1f37ba",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 127,
"avg_line_length": 50.203448275862065,
"alnum_prop": 0.6169379765093757,
"repo_name": "Sonicbids/django",
"id": "e09ee125620563cfa774be969015c1d83526508a",
"size": "14585",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/generic_views/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53544"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10479615"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
def merge_dicts(*dicts):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for d in dicts:
result.update(d)
return result
def evaluate_term(term, **kwargs):
return term.evaluate(**kwargs) if hasattr(term, "evaluate") else term
| {
"content_hash": "e8139ab2b0e197e7df49588300dd78e5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.6480446927374302,
"repo_name": "eykd/ravel",
"id": "7e1a498a9a3802932fcb96367116382755b4fd14",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ravel/utils/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103874"
},
{
"name": "Shell",
"bytes": "238"
}
],
"symlink_target": ""
} |
from django import forms
from hackfsu_com.views.generic import ApiView
from hackfsu_com.util import acl
from api.models import Hackathon, JudgeInfo
class ResponseForm(forms.Form):
approved = forms.BooleanField()
checked_in = forms.BooleanField()
rsvp = forms.BooleanField()
affiliation = forms.CharField()
class ProfileView(ApiView):
http_method_names = ['get']
access_manager = acl.AccessManager(acl_accept=[acl.group_judge, acl.group_pending_judge])
response_form_class = ResponseForm
def work(self, request, req, res):
current_hackathon = Hackathon.objects.current()
info = JudgeInfo.objects.get(hackathon=current_hackathon, user=request.user)
# Status flags
req['approved'] = info.approved
req['checked_in'] = info.checked_in
req['rsvp'] = info.rsvp
# Entered data
res['affiliation'] = info.affiliation
| {
"content_hash": "0fe70ef77a062ac971937a1f25ae445d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 93,
"avg_line_length": 30.366666666666667,
"alnum_prop": 0.6893523600439078,
"repo_name": "andrewsosa/hackfsu_com",
"id": "5083fae00a3eff5986ae04befe354bc3c701a027",
"size": "912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/api/views/judge/get/profile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81944"
},
{
"name": "HTML",
"bytes": "88639"
},
{
"name": "JavaScript",
"bytes": "127887"
},
{
"name": "Python",
"bytes": "279510"
},
{
"name": "Shell",
"bytes": "897"
}
],
"symlink_target": ""
} |
from seat_checker import seat_checker
from file_to_list import file_to_list
from time import sleep
def main():
"""Check each crn course in CRN_file and if a spot is open, print a message
CRN containing file should have the form:
11111
24871
35192
"""
# --------Insert CRN_file name below
crn_file = "CRN_example.txt" # name of file
# --------
crn_list = file_to_list(crn_file)
year = '2018'
semester_dict = {
'spring': '1',
'summer': '2',
'fall': '3'
}
campus_dict = {
'college_station': '1',
'galveston': '2',
'qatar': '3'
}
term = year + semester_dict['spring'] + campus_dict['college_station']
while True:
message = ""
for crn in crn_list:
capacity, actual, remaining = seat_checker(crn, term)
if remaining > 0:
message += "CRN " + crn + " has " + str(remaining) + " seats open"
print(message)
else:
print("CRN " + crn + " has no empty seats.")
wait_time = 60 * 30 # wait 30 minutes between call times
# if wait time is too small, howdy will cut you off from too many calls
sleep(wait_time)
if __name__ == "__main__":
main()
| {
"content_hash": "65fee4655f617873221d2db796b49224",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 82,
"avg_line_length": 26.183673469387756,
"alnum_prop": 0.5331254871395168,
"repo_name": "EMcCormack/TAMU-Seat-Checker",
"id": "b9f573690a62916aba889561894c7a0b1b95fb07",
"size": "1307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2340"
}
],
"symlink_target": ""
} |
import unittest
import os
import warnings
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor, EnumError
from pymatgen import Element, Structure
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation
from monty.os.path import which
from pymatgen.transformations.site_transformations import \
RemoveSitesTransformation
from pymatgen.util.testing import PymatgenTest
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
Created on Jul 22, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 22, 2012"
enum_cmd = which('enum.x') or which('multienum.x')
makestr_cmd = which('makestr.x') or which('makeStr.x') or which('makeStr.py')
enumlib_present = enum_cmd and makestr_cmd
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumlibAdaptorTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({'Li': {'Li': 0.5}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 86)
for s in structures:
self.assertAlmostEqual(
s.composition.get_atomic_fraction(Element("Li")), 0.5 / 6.5)
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2,
refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 52)
subtrans = SubstitutionTransformation({'Li': {'Li': 0.25}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 1,
refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 1)
for s in structures:
self.assertAlmostEqual(s.composition
.get_atomic_fraction(Element("Li")),
0.25 / 6.25)
# Make sure it works for completely disordered structures.
struct = Structure([[10, 0, 0], [0, 10, 0], [0, 0, 10]], [{'Fe': 0.5}],
[[0, 0, 0]])
adaptor = EnumlibAdaptor(struct, 1, 2)
adaptor.run()
self.assertEqual(len(adaptor.structures), 3)
# Make sure it works properly when symmetry is broken by ordered sites.
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({'Li': {'Li': 0.25}})
s = subtrans.apply_transformation(struct)
# REmove some ordered sites to break symmetry.
removetrans = RemoveSitesTransformation([4, 7])
s = removetrans.apply_transformation(s)
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 4)
struct = Structure([[3, 0, 0], [0, 3, 0], [0, 0, 3]],
[{"Si": 0.5}] * 2, [[0, 0, 0], [0.5, 0.5, 0.5]])
adaptor = EnumlibAdaptor(struct, 1, 3, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 10)
struct = Structure.from_file(
os.path.join(test_dir, "EnumerateTest.json"))
adaptor = EnumlibAdaptor(struct, 1, 1)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 2)
def test_rounding_errors(self):
# It used to be that a rounding issue would result in this structure
# showing that Cu3Te2 satisfies an ordering of this structure.
# This has been fixed by multiplying the base by 100.
struct = Structure.from_file(os.path.join(test_dir, "Cu7Te5.cif"))
adaptor = EnumlibAdaptor(struct, 1, 2)
self.assertRaises(EnumError, adaptor.run)
adaptor = EnumlibAdaptor(struct, 1, 5)
adaptor.run()
self.assertEqual(len(adaptor.structures), 197)
def test_partial_disorder(self):
s = Structure.from_file(filename=os.path.join(test_dir, "garnet.cif"))
a = SpacegroupAnalyzer(s, 0.1)
prim = a.find_primitive()
s = prim.copy()
s["Al3+"] = {"Al3+": 0.5, "Ga3+": 0.5}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 7)
for s in structures:
self.assertEqual(s.formula, 'Ca12 Al4 Ga4 Si12 O48')
s = prim.copy()
s["Ca2+"] = {"Ca2+": 1/3, "Mg2+": 2/3}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 20)
for s in structures:
self.assertEqual(s.formula, 'Ca4 Mg8 Al8 Si12 O48')
s = prim.copy()
s["Si4+"] = {"Si4+": 1/3, "Ge4+": 2/3}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 18)
for s in structures:
self.assertEqual(s.formula, 'Ca12 Al8 Si4 Ge8 O48')
@unittest.skip("Fails seemingly at random.")
def test_timeout(self):
s = Structure.from_file(filename=os.path.join(test_dir, "garnet.cif"))
a = SpacegroupAnalyzer(s, 0.1)
s["Al3+"] = {"Al3+": 0.5, "Ga3+": 0.5}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01,
timeout=0.0000000000001)
self.assertRaises(TimeoutError, adaptor._run_multienum)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "90057f9273ed43bfed139d6176e8756d",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 83,
"avg_line_length": 42.46,
"alnum_prop": 0.5897315120113048,
"repo_name": "dongsenfo/pymatgen",
"id": "8d4aef882e1966f10a265853de9abacf44ab9bc2",
"size": "6479",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pymatgen/command_line/tests/test_enumlib_caller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7882640"
},
{
"name": "Roff",
"bytes": "1898220"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
import os
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN)
# sets initial station number to channel 8
station = 8
os.system("mpc play " + str(station))
#initialise previous input variable to 0
prev_input = 0
while True:
#take a reading from pin 23
input = GPIO.input(23)
#if the last reading was low and this one high, increase channel by 1
if ((not prev_input) and input):
# assumes you have 8 radio stations configured
station += 1
if station > 8:
station = 1
os.system("mpc play " + str(station))
#update previous input
prev_input = input
#slight pause to debounce
time.sleep(0.05)
| {
"content_hash": "372611325345ebe2a9d0be793ca9d870",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 22.133333333333333,
"alnum_prop": 0.6927710843373494,
"repo_name": "blogmywiki/pi-radio",
"id": "9b7293995e13ddd7a734c1187ba71c5743f502aa",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radio.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from datetime import date
from django.contrib.auth.models import Group
from questionnaire.forms.filter import UserFilterForm, ExportFilterForm, QuestionFilterForm
from questionnaire.models import Region, Organization, Country, Theme, Questionnaire
from questionnaire.tests.base_test import BaseTest
class UserProfileFormTest(BaseTest):
def setUp(self):
self.region = Region.objects.create(name="Afro")
self.organization = Organization.objects.create(name="UNICEF")
self.global_admin = Group.objects.create(name="UNICEF")
self.form_data = {
'organization': self.organization.id,
'region': self.region.id,
'role': self.global_admin.id,
}
def test_valid(self):
user_filter = UserFilterForm(self.form_data)
self.assertTrue(user_filter.is_valid())
def test_valid_when_organization_is_blank(self):
form_data = self.form_data.copy()
form_data['organization'] = ''
user_filter = UserFilterForm(form_data)
self.assertTrue(user_filter.is_valid())
def test_valid_when_region_is_blank(self):
form_data = self.form_data.copy()
form_data['region'] = ''
user_filter = UserFilterForm(form_data)
self.assertTrue(user_filter.is_valid())
def test_valid_when_role_is_blank(self):
form_data = self.form_data.copy()
form_data['role'] = ''
user_filter = UserFilterForm(form_data)
self.assertTrue(user_filter.is_valid())
class ExportFilterFormTest(BaseTest):
def setUp(self):
self.afro = Region.objects.create(name="Afro")
self.uganda = Country.objects.create(name="Uganda", code="UGX")
self.kenya = Country.objects.create(name="Kenya", code="KY")
self.theme = Theme.objects.create(name="some theme", region=self.afro)
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", year=2013)
self.afro.countries.add(self.kenya, self.uganda)
self.form_data = {
'themes': [self.theme.id],
'year': [self.questionnaire.year],
'regions': [self.afro.id],
'countries': [self.uganda.id, self.kenya.id],
}
def test_valid(self):
export_filter = ExportFilterForm(self.form_data)
self.assertTrue(export_filter.is_valid())
def test_valid_when_themes_are_blank(self):
form_data = self.form_data.copy()
form_data['themes'] = ''
export_filter = ExportFilterForm(form_data)
self.assertTrue(export_filter.is_valid())
def test_valid_when_region_is_blank(self):
form_data = self.form_data.copy()
form_data['regions'] = ''
export_filter = ExportFilterForm(form_data)
self.assertTrue(export_filter.is_valid())
def test_valid_when_countries_is_blank(self):
form_data = self.form_data.copy()
form_data['countries'] = ''
export_filter = ExportFilterForm(form_data)
self.assertTrue(export_filter.is_valid())
def test_valid_when_year_is_blank(self):
form_data = self.form_data.copy()
form_data['year'] = ''
export_filter = ExportFilterForm(form_data)
self.assertTrue(export_filter.is_valid())
def test_invalid_when_if_year_is_does_not_have_questionnaire(self):
form_data = self.form_data.copy()
selected_year = date.today().year
form_data['year'] = [selected_year]
export_filter = ExportFilterForm(form_data)
self.assertFalse(export_filter.is_valid())
error_message = "Select a valid choice. %d is not one of the available choices." % selected_year
self.assertIn(error_message, export_filter.errors['year'])
class QuestionFilterFormTest(BaseTest):
def setUp(self):
self.theme = Theme.objects.create(name="Theme1")
self.form_data = {
'theme': self.theme.id
}
def test_post(self):
question_filter = QuestionFilterForm(self.form_data)
self.assertTrue(question_filter.is_valid()) | {
"content_hash": "843c6cfec5cc71287f06a7e0eb146275",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 104,
"avg_line_length": 38.03738317757009,
"alnum_prop": 0.643980343980344,
"repo_name": "eJRF/ejrf",
"id": "8882bb92401388e2882ada977b2766eaf008e04f",
"size": "4070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questionnaire/tests/forms/test_filter_form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55035"
},
{
"name": "Cucumber",
"bytes": "58812"
},
{
"name": "HTML",
"bytes": "190004"
},
{
"name": "JavaScript",
"bytes": "315169"
},
{
"name": "Python",
"bytes": "1467724"
},
{
"name": "Shell",
"bytes": "4467"
}
],
"symlink_target": ""
} |
import re
from xml.etree import ElementTree
class DBusNode:
def __init__(self, bus, service, path=None):
self.bus = bus
self.service = service
if path is None:
path = '/' + service.replace('.', '/')
self.path = path
@property
def proxy(self):
if 'proxy' not in self.__dict__:
self.__dict__['proxy'] = self.bus.get(self.service, self.path)
return self.__dict__['proxy']
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
return getattr(self.proxy, name)
def clear_cache(self):
del self.__dict__['proxy']
def get_children(self, filt, Cls, key=lambda n, v: n):
children = {}
for n in [e.attrib['name']
for e in ElementTree.fromstring(self.proxy.Introspect())
if e.tag == 'node']:
if re.match(filt, n):
v = Cls(self.bus, self.service, self.path+'/'+n)
children[key(n, v)] = v
return children
def __repr__(self):
return '{}(\'{}\', \'{}\')'.format(type(self).__name__,
self.service, self.path)
| {
"content_hash": "4e64f53f29f2e35b15bdf41626f5d901",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 31,
"alnum_prop": 0.5008064516129033,
"repo_name": "coiot-ble/coiotd",
"id": "d02547f8b10ba4dce411d7ddfa795d55d7c00e2d",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coiot/dbus_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59511"
},
{
"name": "Shell",
"bytes": "2057"
}
],
"symlink_target": ""
} |
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
class GrpcServerTest(tf.test.TestCase):
def testRunStep(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testMultipleSessions(self):
server = tf.train.Server.create_local_server()
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testLargeConstant(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = tf.constant(const_val)
shape_t = tf.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = tf.placeholder(tf.float32, shape=[10000, 3000])
min_t = tf.reduce_min(p)
max_t = tf.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testCloseCancelsBlockingOperation(self):
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
q = tf.FIFOQueue(10, [tf.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaises(tf.errors.CancelledError):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testInvalidHostname(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "port"):
_ = tf.train.Server({"local": ["localhost"]},
job_name="local",
task_index=0)
class ServerDefTest(tf.test.TestCase):
def testLocalServer(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222", "localhost:2223"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testClusterSpec(self):
cluster_spec = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "925dea75ef62f41abaa7529bed99446e",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 78,
"avg_line_length": 34.256830601092894,
"alnum_prop": 0.6158877013877812,
"repo_name": "ivano666/tensorflow",
"id": "b1133aeefdf837a27411975e6cf7cfc606b83b87",
"size": "6946",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/server_lib_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "155915"
},
{
"name": "C++",
"bytes": "9051139"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "763492"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "Protocol Buffer",
"bytes": "110179"
},
{
"name": "Python",
"bytes": "6027764"
},
{
"name": "Shell",
"bytes": "165125"
},
{
"name": "TypeScript",
"bytes": "403037"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/player/shared_wookiee_male.iff"
result.attribute_template_id = -1
result.stfName("species","wookiee")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "10ed60bbbd3623aecda5b56c49548467",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 22.53846153846154,
"alnum_prop": 0.6928327645051194,
"repo_name": "anhstudios/swganh",
"id": "8e070d55e826fb6925880db2649e4b203887398c",
"size": "438",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/creature/player/shared_wookiee_male.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
Tests exportExcelToTable
"""
from exportTableToExcel import exportTableToExcel, \
_exportDataframeToExcel
from importExcelToTable import importExcelToTable, \
_importExcelToDataframe
from scisheets.core import api as api
from scisheets.core import helpers_test as ht
import os
import pandas as pd
import unittest
TEST_FILE = os.path.join(ht.TEST_DIR, "excel_write.xlsx")
#############################
# Tests
#############################
# pylint: disable=W0212,C0111,R0904
class TestImportExcel(unittest.TestCase):
def setUp(self):
ht.setupTableInitialization(self)
self.api = api.APIFormulas(self.table)
self.df = self._createDataframe()
self.columns = self.df.columns
def _createDataframe(self):
df = pd.DataFrame()
table = self.api.getTable()
for column in table.getDataColumns():
name = column.getName()
df[name] = column.getCells()
return df
def testBadPath(self):
return
b = False
error = None
try:
df = pd.DataFrame()
exportTableToExcel(self.api, "this/badpath.csv", df)
except Exception as e:
error = e
import pdb; pdb.set_trace()
b = isinstance(e, IOError) or isinstance(e, ValueError)
self.assertTrue(b)
def testBadColumn(self):
with self.assertRaises(ValueError):
exportTableToExcel(self.api, TEST_FILE, self.df, ['w'])
def _compareExportedDataframe(self, worksheet, columns):
excel_df = _importExcelToDataframe(TEST_FILE,
worksheet=worksheet)
self.assertEqual(len(excel_df.columns), len(columns))
for name in columns:
b = list(excel_df[name]) == list(self.df[name])
if not b:
import pdb; pdb.set_trace()
self.assertTrue(b)
def _testExportDataframe(self, worksheet=None):
_exportDataframeToExcel(self.df, TEST_FILE, worksheet=worksheet)
self._compareExportedDataframe(worksheet, self.df.columns)
def testExportDataframe(self):
self._testExportDataframe()
self._testExportDataframe(worksheet='Sheet1')
def _testExportTable(self, columns=None, worksheet=None):
exportTableToExcel(self.api,
TEST_FILE,
worksheet=worksheet,
columns=columns)
if columns is None:
columns = self.columns
if worksheet is None:
expected_worksheet = 'Sheet1'
self._compareExportedDataframe(worksheet, columns)
def testExportTable(self):
self._testExportTable()
self._testExportTable(columns=['DUMMY1_COLUMN'])
self._testExportTable(worksheet='Sheet1')
self._testExportTable(worksheet='Sheet1',
columns=['DUMMY1_COLUMN', 'DUMMY2_COLUMN'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2824e7c8b76e0a244b8611fd7f848792",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 68,
"avg_line_length": 29.559139784946236,
"alnum_prop": 0.6558748635867588,
"repo_name": "ScienceStacks/JViz",
"id": "965e02501b82f66f116750a4f8d133abea16d732",
"size": "2749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mysite/scisheets/plugins/test_exportTableToExcel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35577"
},
{
"name": "HTML",
"bytes": "193853"
},
{
"name": "JavaScript",
"bytes": "176959"
},
{
"name": "Makefile",
"bytes": "4090"
},
{
"name": "Python",
"bytes": "190908"
},
{
"name": "Shell",
"bytes": "6770"
}
],
"symlink_target": ""
} |
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import math
import random
import sys
import nose.tools as nt
try:
from pathlib import Path
except ImportError:
# for Python 3.3
from pathlib2 import Path
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Basic columnize tests."""
size = 5
items = [l*size for l in 'abcd']
out = text.columnize(items, displaywidth=80)
nt.assert_equal(out, 'aaaaa bbbbb ccccc ddddd\n')
out = text.columnize(items, displaywidth=25)
nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
out = text.columnize(items, displaywidth=12)
nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
out = text.columnize(items, displaywidth=10)
nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\nddddd\n')
out = text.columnize(items, row_first=True, displaywidth=80)
nt.assert_equal(out, 'aaaaa bbbbb ccccc ddddd\n')
out = text.columnize(items, row_first=True, displaywidth=25)
nt.assert_equal(out, 'aaaaa bbbbb\nccccc ddddd\n')
out = text.columnize(items, row_first=True, displaywidth=12)
nt.assert_equal(out, 'aaaaa bbbbb\nccccc ddddd\n')
out = text.columnize(items, row_first=True, displaywidth=10)
nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\nddddd\n')
out = text.columnize(items, displaywidth=40, spread=True)
nt.assert_equal(out, 'aaaaa bbbbb ccccc ddddd\n')
out = text.columnize(items, displaywidth=20, spread=True)
nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
out = text.columnize(items, displaywidth=12, spread=True)
nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
out = text.columnize(items, displaywidth=10, spread=True)
nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\nddddd\n')
def test_columnize_random():
"""Test with random input to hopfully catch edge case """
for row_first in [True, False]:
for nitems in [random.randint(2,70) for i in range(2,20)]:
displaywidth = random.randint(20,200)
rand_len = [random.randint(2,displaywidth) for i in range(nitems)]
items = ['x'*l for l in rand_len]
out = text.columnize(items, row_first=row_first, displaywidth=displaywidth)
longer_line = max([len(x) for x in out.split('\n')])
longer_element = max(rand_len)
if longer_line > displaywidth:
print("Columnize displayed something lager than displaywidth : %s " % longer_line)
print("longer element : %s " % longer_element)
print("displaywidth : %s " % displaywidth)
print("number of element : %s " % nitems)
print("size of each element :\n %s" % rand_len)
assert False, "row_first={0}".format(row_first)
def test_columnize_medium():
"""Test with inputs than shouldn't be wider than 80"""
size = 40
items = [l*size for l in 'abc']
for row_first in [True, False]:
out = text.columnize(items, row_first=row_first, displaywidth=80)
nt.assert_equal(out, '\n'.join(items+['']), "row_first={0}".format(row_first))
def test_columnize_long():
"""Test columnize with inputs longer than the display window"""
size = 11
items = [l*size for l in 'abc']
for row_first in [True, False]:
out = text.columnize(items, row_first=row_first, displaywidth=size-1)
nt.assert_equal(out, '\n'.join(items+['']), "row_first={0}".format(row_first))
def eval_formatter_check(f):
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os, u=u"café", b="café")
s = f.format("{n} {n//4} {stuff.split()[0]}", **ns)
nt.assert_equal(s, "12 3 hello")
s = f.format(' '.join(['{n//%i}'%i for i in range(1,8)]), **ns)
nt.assert_equal(s, "12 6 4 3 2 2 1")
s = f.format('{[n//i for i in range(1,8)]}', **ns)
nt.assert_equal(s, "[12, 6, 4, 3, 2, 2, 1]")
s = f.format("{stuff!s}", **ns)
nt.assert_equal(s, ns['stuff'])
s = f.format("{stuff!r}", **ns)
nt.assert_equal(s, repr(ns['stuff']))
# Check with unicode:
s = f.format("{u}", **ns)
nt.assert_equal(s, ns['u'])
# This decodes in a platform dependent manner, but it shouldn't error out
s = f.format("{b}", **ns)
nt.assert_raises(NameError, f.format, '{dne}', **ns)
def eval_formatter_slicing_check(f):
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
s = f.format(" {stuff.split()[:]} ", **ns)
nt.assert_equal(s, " ['hello', 'there'] ")
s = f.format(" {stuff.split()[::-1]} ", **ns)
nt.assert_equal(s, " ['there', 'hello'] ")
s = f.format("{stuff[::2]}", **ns)
nt.assert_equal(s, ns['stuff'][::2])
nt.assert_raises(SyntaxError, f.format, "{n:x}", **ns)
def eval_formatter_no_slicing_check(f):
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
s = f.format('{n:x} {pi**2:+f}', **ns)
nt.assert_equal(s, "c +9.869604")
s = f.format('{stuff[slice(1,4)]}', **ns)
nt.assert_equal(s, 'ell')
if sys.version_info >= (3, 4):
# String formatting has changed in Python 3.4, so this now works.
s = f.format("{a[:]}", a=[1, 2])
nt.assert_equal(s, "[1, 2]")
else:
nt.assert_raises(SyntaxError, f.format, "{a[:]}")
def test_eval_formatter():
f = text.EvalFormatter()
eval_formatter_check(f)
eval_formatter_no_slicing_check(f)
def test_full_eval_formatter():
f = text.FullEvalFormatter()
eval_formatter_check(f)
eval_formatter_slicing_check(f)
def test_dollar_formatter():
f = text.DollarFormatter()
eval_formatter_check(f)
eval_formatter_slicing_check(f)
ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
s = f.format("$n", **ns)
nt.assert_equal(s, "12")
s = f.format("$n.real", **ns)
nt.assert_equal(s, "12")
s = f.format("$n/{stuff[:5]}", **ns)
nt.assert_equal(s, "12/hello")
s = f.format("$n $$HOME", **ns)
nt.assert_equal(s, "12 $HOME")
s = f.format("${foo}", foo="HOME")
nt.assert_equal(s, "$HOME")
def test_long_substr():
data = ['hi']
nt.assert_equal(text.long_substr(data), 'hi')
def test_long_substr2():
data = ['abc', 'abd', 'abf', 'ab']
nt.assert_equal(text.long_substr(data), 'ab')
def test_long_substr_empty():
data = []
nt.assert_equal(text.long_substr(data), '')
def test_strip_email():
src = """\
>> >>> def f(x):
>> ... return x+1
>> ...
>> >>> zz = f(2.5)"""
cln = """\
>>> def f(x):
... return x+1
...
>>> zz = f(2.5)"""
nt.assert_equal(text.strip_email_quotes(src), cln)
def test_strip_email2():
src = '> > > list()'
cln = 'list()'
nt.assert_equal(text.strip_email_quotes(src), cln)
def test_LSString():
lss = text.LSString("abc\ndef")
nt.assert_equal(lss.l, ['abc', 'def'])
nt.assert_equal(lss.s, 'abc def')
lss = text.LSString(os.getcwd())
nt.assert_is_instance(lss.p[0], Path)
def test_SList():
sl = text.SList(['a 11', 'b 1', 'a 2'])
nt.assert_equal(sl.n, 'a 11\nb 1\na 2')
nt.assert_equal(sl.s, 'a 11 b 1 a 2')
nt.assert_equal(sl.grep(lambda x: x.startswith('a')), text.SList(['a 11', 'a 2']))
nt.assert_equal(sl.fields(0), text.SList(['a', 'b', 'a']))
nt.assert_equal(sl.sort(field=1, nums=True), text.SList(['b 1', 'a 2', 'a 11']))
| {
"content_hash": "cd592d885a5ddfc81a806adc0e621db5",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 98,
"avg_line_length": 36.49090909090909,
"alnum_prop": 0.5581714000996513,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "ad533030abde5c257b8d8645ce2f5bc4fb108850",
"size": "8048",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/IPython/utils/tests/test_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import base64
import os
import re
import zlib
import itertools
import hashlib
import datetime
import collections
import six
from six.moves import zip as izip
from . import environment
from .console import log
from .machine import Machine
from . import statistics
from . import util
def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
skip_files = set([
'machine.json', 'benchmarks.json'
])
for root, dirs, files in os.walk(results):
# Iterate over files only if machine.json is valid json
machine_json = os.path.join(root, "machine.json")
try:
data = util.load_json(machine_json, api_version=Machine.api_version)
machine_name = data.get('machine')
if not isinstance(machine_name, six.text_type):
raise util.UserError("malformed {0}".format(machine_json))
except util.UserError as err:
machine_json_err = "Skipping results: {0}".format(six.text_type(err))
except IOError as err:
machine_json_err = "Skipping results: could not load {0}".format(
machine_json)
else:
machine_json_err = None
# Iterate over files
for filename in files:
if filename not in skip_files and filename.endswith('.json'):
if machine_json_err is not None:
# Show the warning only if there are some files to load
log.warning(machine_json_err)
break
yield (root, filename, machine_name)
def iter_results(results):
"""
Iterate over all of the result files.
"""
for (root, filename, machine_name) in iter_results_paths(results):
try:
yield Results.load(os.path.join(root, filename), machine_name=machine_name)
except util.UserError as exc:
log.warning(six.text_type(exc))
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def iter_results_for_machine_and_hash(results, machine_name, commit):
"""
Iterate over all of the result files with a given hash for a
particular machine.
"""
full_commit = get_result_hash_from_prefix(results, machine_name, commit)
for (root, filename, machine_name) in iter_results_paths(
os.path.join(results, machine_name)):
results_commit = filename.split('-')[0]
if results_commit == full_commit:
try:
yield Results.load(os.path.join(root, filename), machine_name=machine_name)
except util.UserError as exc:
log.warning(six.text_type(exc))
def iter_existing_hashes(results):
"""
Iterate over all of the result commit hashes and dates and yields
commit_hash.
May return duplicates. Use `get_existing_hashes` if that matters.
"""
for result in iter_results(results):
yield result.commit_hash
def get_existing_hashes(results):
"""
Get a list of the commit hashes that have already been tested.
"""
log.info("Getting existing hashes")
hashes = list(set(iter_existing_hashes(results)))
return hashes
def get_result_hash_from_prefix(results, machine_name, commit_prefix):
"""
Get the 8-char result commit identifier from a potentially shorter
prefix. Only considers the set of commits that have had
results computed.
Returns None if there are no matches. Raises a UserError
if the prefix is non-unique.
"""
commits = set([])
path = os.path.join(results, machine_name)
for (root, filename, r_machine_name) in iter_results_paths(path):
if r_machine_name != machine_name:
log.warning("Skipping results '{0}': machine name is not '{1}'".format(
os.path.join(root, filename), machine_name))
continue
results_commit = filename.split('-')[0]
cmp_len = min(len(commit_prefix), len(results_commit))
if results_commit[:cmp_len] == commit_prefix[:cmp_len]:
commits.add(results_commit)
if len(commits) > 1:
commit_list_str = ', '.join(sorted(commits))
raise util.UserError('Git hash prefix could represent one of ' +
'multiple commits: {0}'.format(commit_list_str))
elif len(commits) == 1:
return list(commits)[0]
else:
return None
def get_filename(machine, commit_hash, env_name):
"""
Get the result filename for a given machine, commit_hash and
environment.
If the environment name is too long, use its hash instead.
"""
if env_name and len(env_name) >= 128:
env_name = "env-" + hashlib.md5(env_name.encode('utf-8')).hexdigest()
return os.path.join(
machine,
"{0}-{1}.json".format(
commit_hash[:8],
env_name))
def _compatible_results(result, result_params, params):
"""
For parameterized benchmarks, obtain values from *result* that
are compatible with parameters of *benchmark*
"""
if result is None:
# All results missing, eg. build failure
return [None for param in itertools.product(*params)]
# Pick results for those parameters that also appear in the
# current benchmark
old_results = {}
for param, value in izip(itertools.product(*result_params), result):
old_results[param] = value
new_results = []
for param in itertools.product(*params):
new_results.append(old_results.get(param))
return new_results
class Results(object):
"""
Manage a set of benchmark results for a single machine and commit
hash.
"""
api_version = 2
def __init__(self,
params,
requirements,
commit_hash,
date,
python,
env_name,
env_vars):
"""
Parameters
----------
params : dict
Parameters describing the environment in which the
benchmarks were run.
requirements : list
Requirements of the benchmarks being run.
commit_hash : str
The commit hash for the benchmark run.
date : int
JavaScript timestamp for when the commit was merged into
the repository.
python : str
A Python version specifier.
env_name : str
Environment name
env_vars: dict
Environment variables
"""
self._params = params
self._requirements = requirements
self._commit_hash = commit_hash
self._date = date
self._results = {}
self._samples = {}
self._stats = {}
self._benchmark_params = {}
self._profiles = {}
self._python = python
self._env_name = env_name
self._started_at = {}
self._duration = {}
self._benchmark_version = {}
self._env_vars = env_vars
# Note: stderr and errcode are not saved to files
self._stderr = {}
self._errcode = {}
if commit_hash is not None:
self._filename = get_filename(
params['machine'], self._commit_hash, env_name)
else:
self._filename = None
@classmethod
def unnamed(cls):
return cls({}, {}, None, None, None, None, {})
@property
def commit_hash(self):
return self._commit_hash
@property
def date(self):
return self._date
@property
def params(self):
return self._params
@property
def env_vars(self):
return self._env_vars
@property
def started_at(self):
return self._started_at
@property
def duration(self):
return self._duration
def set_build_duration(self, value):
self._duration["<build>"] = float(value)
def set_setup_cache_duration(self, setup_cache_key, value):
self._duration["<setup_cache {}>".format(setup_cache_key)] = float(value)
@property
def benchmark_version(self):
return self._benchmark_version
@property
def stderr(self):
return self._stderr
@property
def errcode(self):
return self._errcode
def get_all_result_keys(self):
"""
Return all available result keys.
"""
return six.iterkeys(self._results)
def get_result_keys(self, benchmarks):
"""
Return result keys corresponding to benchmarks.
Parameters
----------
benchmarks : Benchmarks
Benchmarks to return results for.
Used for checking benchmark versions.
Returns
-------
keys : set
Set of benchmark result keys
"""
keys = set()
for key in six.iterkeys(self._results):
if key not in benchmarks:
continue
version = self._benchmark_version.get(key)
bench_version = benchmarks[key].get('version')
if version is not None and version != bench_version:
continue
keys.add(key)
return keys
def get_result_value(self, key, params):
"""
Return the value of benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
value : {float, list of float}
Benchmark result value. If the benchmark is parameterized, return
a list of values.
"""
return _compatible_results(self._results[key],
self._benchmark_params[key],
params)
def get_result_stats(self, key, params):
"""
Return the statistical information of a benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
stats : {None, dict, list of dict}
Result statistics. If the benchmark is parameterized,
return a list of values.
"""
return _compatible_results(self._stats[key],
self._benchmark_params[key],
params)
def get_result_samples(self, key, params):
"""
Return the raw data points of a benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
samples : {None, list}
Raw result samples. If the benchmark is parameterized,
return a list of values.
"""
return _compatible_results(self._samples[key],
self._benchmark_params[key],
params)
def get_result_params(self, key):
"""
Return the benchmark parameters of the given result
"""
return self._benchmark_params[key]
def remove_result(self, key):
"""
Remove results corresponding to a given benchmark.
"""
del self._results[key]
del self._benchmark_params[key]
del self._samples[key]
del self._stats[key]
# Remove profiles (may be missing)
self._profiles.pop(key, None)
# Remove run times (may be missing in old files)
self._started_at.pop(key, None)
self._duration.pop(key, None)
# Remove version (may be missing)
self._benchmark_version.pop(key, None)
def remove_samples(self, key, selected_idx=None):
"""
Remove measurement samples from the selected benchmark.
"""
if key not in self._results:
raise ValueError(key)
if selected_idx is None:
self._samples[key] = None
elif self._samples[key] is not None:
for j in selected_idx:
self._samples[key][j] = None
def add_result(self, benchmark, result,
started_at=None, duration=None,
record_samples=False,
append_samples=False,
selected_idx=None):
"""
Add benchmark result.
Parameters
----------
benchmark : dict
Benchmark object
result : runner.BenchmarkResult
Result of the benchmark.
started_at : datetime.datetime, optional
Benchmark start time.
duration : float, optional
Benchmark total duration in seconds.
record_samples : bool, optional
Whether to save samples.
append_samples : bool, optional
Whether to combine new samples with old.
selected_idx : set, optional
Which indices in a parametrized benchmark to update
"""
new_result = list(result.result)
new_samples = list(result.samples)
new_number = result.number
benchmark_name = benchmark['name']
benchmark_version = benchmark['version']
if started_at is None:
started_at = datetime.datetime.utcnow()
new_stats = [None] * len(new_result)
if (benchmark_name in self._results and
benchmark_version == self._benchmark_version.get(benchmark_name)):
# Append to old samples, if requested
if append_samples:
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
for j in range(len(new_samples)):
if old_samples[j] is not None and new_samples[j] is not None:
new_samples[j] = old_samples[j] + new_samples[j]
# Retain old result where requested
merge_idx = [j for j in range(len(new_result))
if selected_idx is not None and j not in selected_idx]
if merge_idx:
old_result = self.get_result_value(benchmark_name, benchmark['params'])
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
old_stats = self.get_result_stats(benchmark_name, benchmark['params'])
for j in merge_idx:
new_result[j] = old_result[j]
new_samples[j] = old_samples[j]
new_stats[j] = old_stats[j]
# Recompute stats for updated entries (and drop unnecessary data)
for j, (r, s, n) in enumerate(zip(new_result, new_samples, new_number)):
if util.is_na(r):
new_samples[j] = None
new_stats[j] = None
continue
if n is not None:
new_result[j], new_stats[j] = statistics.compute_stats(s, n)
# Compress None lists to just None
if all(x is None for x in new_result):
new_result = None
if all(x is None for x in new_samples):
new_samples = None
if all(x is None for x in new_stats):
new_stats = None
# Drop samples if requested
if not record_samples:
new_samples = None
# Store result
self._results[benchmark_name] = new_result
self._stats[benchmark_name] = new_stats
self._samples[benchmark_name] = new_samples
self._benchmark_params[benchmark_name] = benchmark['params'] if benchmark['params'] else []
self._started_at[benchmark_name] = util.datetime_to_js_timestamp(started_at)
if duration is None:
self._duration.pop(benchmark_name, None)
else:
self._duration[benchmark_name] = float(duration)
self._benchmark_version[benchmark_name] = benchmark_version
self._stderr[benchmark_name] = result.stderr
self._errcode[benchmark_name] = result.errcode
if result.profile:
profile_data = base64.b64encode(zlib.compress(result.profile))
if sys.version_info[0] >= 3:
profile_data = profile_data.decode('ascii')
self._profiles[benchmark_name] = profile_data
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
Parameters
----------
benchmark_name : str
Name of benchmark
Returns
-------
profile_data : bytes
Raw profile data
"""
profile_data = self._profiles[benchmark_name]
if sys.version_info[0] >= 3:
profile_data = profile_data.encode('ascii')
return zlib.decompress(base64.b64decode(profile_data))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk, replacing existing results.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
if self._filename is None:
raise ValueError("Cannot save unnamed Results")
path = os.path.join(result_dir, self._filename)
results = {}
simple_dict = {
'result': self._results,
'params': self._benchmark_params,
'version': self._benchmark_version,
'started_at': self._started_at,
'duration': self._duration,
'samples': self._samples,
'profile': self._profiles,
}
all_keys = ['result', 'params', 'version', 'started_at', 'duration',
'stats_ci_99_a', 'stats_ci_99_b', 'stats_q_25', 'stats_q_75',
'stats_number', 'stats_repeat', 'samples', 'profile']
for name in six.iterkeys(self._results):
row = []
for key in all_keys:
if key in simple_dict:
value = simple_dict[key].get(name)
else:
assert key[:6] == 'stats_'
z = self._stats[name]
if z is None:
value = None
else:
value = [x.get(key[6:]) if x is not None else None
for x in z]
if key != 'params':
if isinstance(value, list) and all(x is None for x in value):
value = None
if key.startswith('stats_') or key == 'duration':
value = util.truncate_float_list(value)
row.append(value)
while row and row[-1] is None:
row.pop()
results[name] = row
other_durations = {}
for key, value in six.iteritems(self._duration):
if key.startswith('<'):
other_durations[key] = value
data = {
'commit_hash': self._commit_hash,
'env_name': self._env_name,
'date': self._date,
'params': self._params,
'python': self._python,
'requirements': self._requirements,
'env_vars': self._env_vars,
'result_columns': all_keys,
'results': results,
'durations': other_durations,
}
util.write_json(path, data, self.api_version, compact=True)
def load_data(self, result_dir):
"""
Load previous results for the current parameters (if any).
"""
if self._filename is None:
raise ValueError("Cannot load unnamed Results")
path = os.path.join(result_dir, self._filename)
if os.path.isfile(path):
old = self.load(path)
for dict_name in ('_results', '_samples', '_stats', '_env_vars',
'_benchmark_params', '_profiles', '_started_at',
'_duration', '_benchmark_version'):
setattr(self, dict_name, getattr(old, dict_name))
@classmethod
def load(cls, path, machine_name=None):
"""
Load results from disk.
Parameters
----------
path : str
Path to results file.
machine_name : str, optional
If given, check that the results file is for the given machine.
"""
d = util.load_json(path, cls.api_version)
d.setdefault('env_vars', {})
try:
obj = cls(
d['params'],
d['requirements'],
d['commit_hash'],
d['date'],
d['python'],
d['env_name'],
d['env_vars'],
)
obj._results = {}
obj._samples = {}
obj._stats = {}
obj._benchmark_params = {}
obj._profiles = {}
obj._started_at = {}
obj._duration = d.get('durations', {})
obj._benchmark_version = {}
simple_keys = {
'result': obj._results,
'params': obj._benchmark_params,
'version': obj._benchmark_version,
'started_at': obj._started_at,
'duration': obj._duration,
'samples': obj._samples,
'profile': obj._profiles,
}
for name, key_values in six.iteritems(d['results']):
for key, value in zip(d['result_columns'], key_values):
key_dict = simple_keys.get(key)
if key_dict is not None:
key_dict[name] = value
continue
elif key.startswith('stats_'):
if value is not None:
if name not in obj._stats:
obj._stats[name] = [{}]*len(value)
stats_key = key[6:]
for j, v in enumerate(value):
if v is not None:
obj._stats[name][j][stats_key] = v
else:
raise KeyError("unknown data key {}".format(key))
for key_dict in simple_keys.values():
key_dict.setdefault(name, None)
obj._stats.setdefault(name, None)
obj._filename = os.path.join(*path.split(os.path.sep)[-2:])
except KeyError as exc:
raise util.UserError(
"Error loading results file '{0}': missing key {1}".format(
path, six.text_type(exc)))
if machine_name is not None and obj.params.get('machine') != machine_name:
raise util.UserError(
"Error loading results file '{0}': machine name is not '{1}'".format(
path, machine_name))
return obj
def rm(self, result_dir):
if self._filename is None:
raise ValueError("Cannot remove unnamed Results")
path = os.path.join(result_dir, self._filename)
os.remove(path)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version, compact=True)
@property
def env_name(self):
return self._env_name
#
# Old data format support
#
@classmethod
def update_to_2(cls, d):
"""
Reformat data in api_version 1 format to version 2.
"""
try:
d2 = {}
d2['commit_hash'] = d['commit_hash']
d2['date'] = d['date']
d2['env_name'] = d.get('env_name',
environment.get_env_name('',
d['python'],
d['requirements'],
{}))
d2['params'] = d['params']
d2['python'] = d['python']
d2['requirements'] = d['requirements']
d2['env_vars'] = d.get('env_vars', {})
# Backward-compatible load
results = {}
samples = {}
stats = {}
benchmark_params = {}
for key, value in six.iteritems(d['results']):
# Backward compatibility
if not isinstance(value, dict):
value = {'result': [value], 'samples': None,
'stats': None, 'params': []}
if not isinstance(value['result'], list):
value['result'] = [value['result']]
if 'stats' in value and not isinstance(value['stats'], list):
value['stats'] = [value['stats']]
value.setdefault('samples', None)
value.setdefault('stats', None)
value.setdefault('params', [])
# Assign results
results[key] = value['result']
samples[key] = value['samples']
stats[key] = value['stats']
benchmark_params[key] = value['params']
if 'profiles' in d:
profiles = d['profiles']
else:
profiles = {}
started_at = d.get('started_at', {})
duration = d.get('duration', {})
benchmark_version = d.get('benchmark_version', {})
# Convert to new format
getters = [
('result', results, None),
('params', benchmark_params, None),
('version', benchmark_version, None),
('started_at', started_at, None),
('duration', duration, None),
('stats_ci_99_a', stats, lambda z: z['ci_99'][0]),
('stats_ci_99_b', stats, lambda z: z['ci_99'][1]),
('stats_q_25', stats, lambda z: z.get('q_25')),
('stats_q_75', stats, lambda z: z.get('q_75')),
('stats_number', stats, lambda z: z.get('number')),
('stats_repeat', stats, lambda z: z.get('repeat')),
('samples', samples, None),
('profile', profiles, None),
]
names = set()
for key_dict in (results, benchmark_params):
names.update(key_dict.keys())
d2['result_columns'] = [x[0] for x in getters]
d2['results'] = {}
for name in sorted(names):
r = []
for key_name, key_dict, key_getter in getters:
value = key_dict.get(name)
if key_getter is not None and value is not None:
if isinstance(value, list):
value = [key_getter(z) if z is not None else None
for z in value]
else:
value = key_getter(value)
if key_name.startswith('stats_') or key_name == 'duration':
value = util.truncate_float_list(value)
if key_name == 'params' and value is None:
value = []
if key_name != 'params' and isinstance(value, list):
if all(x is None for x in value):
value = None
r.append(value)
while r and r[-1] is None:
r.pop()
d2['results'][name] = r
d2['durations'] = {}
for key, value in six.iteritems(duration):
if key.startswith('<'):
d2['durations'][key] = value
return d2
except KeyError as exc:
raise util.UserError(
"Error loading results data: missing key {}".format(
six.text_type(exc)))
def format_benchmark_result(results, benchmark):
"""
Pretty-print a benchmark result to human-readable form.
Parameters
----------
results : Results
Result set object
benchmark : dict
Benchmark dictionary
Returns
-------
info : {str, None}
One-line description of results
details : {str, None}
Additional details
"""
name = benchmark['name']
result = results.get_result_value(name, benchmark['params'])
stats = results.get_result_stats(name, benchmark['params'])
total_count = len(result)
failure_count = sum(r is None for r in result)
info = None
details = None
# Display status
if failure_count > 0:
if failure_count == total_count:
info = "failed"
else:
info = "{0}/{1} failed".format(failure_count, total_count)
# Display results
if benchmark['params']:
# Long format display
if failure_count == 0:
info = "ok"
display_result = [(v, statistics.get_err(v, s) if s is not None else None)
for v, s in zip(result, stats)]
display = _format_benchmark_result(display_result, benchmark)
display = "\n".join(display).strip()
details = display
else:
if failure_count == 0:
# Failure already shown above
if not result:
display = "[]"
else:
if stats[0]:
err = statistics.get_err(result[0], stats[0])
else:
err = None
display = util.human_value(result[0], benchmark['unit'], err=err)
if len(result) > 1:
display += ";..."
info = display
return info, details
def _format_benchmark_result(result, benchmark, max_width=None):
"""
Format the result from a parameterized benchmark as an ASCII table
"""
if not result:
return ['[]']
def do_formatting(num_column_params):
# Fold result to a table
if num_column_params > 0:
column_params = benchmark['params'][-num_column_params:]
else:
column_params = []
rows = []
if column_params:
row_params = benchmark['params'][:-len(column_params)]
header = benchmark['param_names'][:len(row_params)]
column_param_permutations = list(itertools.product(*column_params))
header += [" / ".join(_format_param_value(value) for value in values)
for values in column_param_permutations]
rows.append(header)
column_items = len(column_param_permutations)
name_header = " / ".join(benchmark['param_names'][len(row_params):])
else:
column_items = 1
row_params = benchmark['params']
name_header = ""
header = benchmark['param_names']
rows.append(header)
for j, values in enumerate(itertools.product(*row_params)):
row_results = [util.human_value(x[0], benchmark['unit'], err=x[1])
for x in result[j*column_items:(j+1)*column_items]]
row = [_format_param_value(value) for value in values] + row_results
rows.append(row)
if name_header:
display = util.format_text_table(rows, 1,
top_header_text=name_header,
top_header_span_start=len(row_params))
else:
display = util.format_text_table(rows, 1)
return display.splitlines()
# Determine how many parameters can be fit to columns
if max_width is None:
max_width = util.get_terminal_width() * 3//4
text = do_formatting(0)
for j in range(1, len(benchmark['params'])):
new_text = do_formatting(j)
width = max(len(line) for line in new_text)
if width < max_width:
text = new_text
else:
break
return text
def _format_param_value(value_repr):
"""
Format a parameter value for displaying it as test output. The
values are string obtained via Python repr.
"""
regexs = ["^'(.+)'$",
"^u'(.+)'$",
"^<class '(.+)'>$"]
for regex in regexs:
m = re.match(regex, value_repr)
if m and m.group(1).strip():
return m.group(1)
return value_repr
| {
"content_hash": "26438e089844abfb7a9c8670b6a5a08b",
"timestamp": "",
"source": "github",
"line_count": 1034,
"max_line_length": 99,
"avg_line_length": 31.900386847195357,
"alnum_prop": 0.5227830832196453,
"repo_name": "qwhelan/asv",
"id": "d17c9a146a22417a9b2909fb51ea45f48d86664b",
"size": "33074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asv/results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "11960"
},
{
"name": "CSS",
"bytes": "4240"
},
{
"name": "HTML",
"bytes": "8621"
},
{
"name": "JavaScript",
"bytes": "112750"
},
{
"name": "Python",
"bytes": "743235"
}
],
"symlink_target": ""
} |
def process(self):
self.edit("LATIN")
if self.has("DIGIT"):
self.edit("DIGIT", "")
self.lower()
self.edit("SPACE", "space")
self.edit("EXCLAMATION MARK", "exclam")
self.edit("QUESTION MARK", "question")
self.edit("QUOTATION MARK", "quotedbl")
self.edit("NUMBER SIGN", "numbersign")
self.edit("DOLLAR SIGN", "dollar")
self.edit("PERCENT SIGN", "percent")
self.edit("PLUS SIGN", "plus")
self.edit("SEMICOLON", "semicolon")
self.edit("MULTIPLICATION SIGN", "multiply")
self.edit("DIVISION SIGN", "divide")
self.edit("COLON", "colon")
self.edit("COMMA", "comma")
self.edit("EQUALS SIGN", "equal")
self.edit("LESS-THAN SIGN", "less")
self.edit("GREATER-THAN SIGN", "greater")
self.edit("REVERSE SOLIDUS", "backslash")
self.edit("SOLIDUS", "slash")
self.edit("VERTICAL LINE", "bar")
self.edit("HYPHEN-MINUS", "hyphen")
self.edit("AMPERSAND", "ampersand")
self.edit("ASTERISK", "asterisk")
self.edit("APOSTROPHE", "quotesingle")
self.edit("FULL STOP", "period")
self.edit("LOW LINE", "underscore")
self.edit("CIRCUMFLEX ACCENT", "asciicircum")
self.edit("GRAVE ACCENT", "grave")
self.edit("TILDE", "asciitilde")
self.edit("SQUARE BRACKET", "bracket")
self.edit("CURLY BRACKET", "brace")
self.edit("PARENTHESIS", "parenthesis")
self.edit("LEFT", "left")
if self.has("RIGHT") and not self.has("COPYRIGHT"):
self.edit("RIGHT", "right")
self.handleCase()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
printRange("Basic Latin")
| {
"content_hash": "8520e7e9a82e46bac0fb9187d922fdfc",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 55,
"avg_line_length": 36.15555555555556,
"alnum_prop": 0.6238475722188076,
"repo_name": "LettError/glyphNameFormatter",
"id": "980ad9f2880a84dcce5d659f6b84e6814b8f664f",
"size": "1628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/glyphNameFormatter/rangeProcessors/basic_latin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "12372"
},
{
"name": "Python",
"bytes": "224030"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from accounts.views import SignInAndSignUp, LogoutView, ProductView, ServiceView, ContactView, AboutView
from oscar.app import application as oscarapplication
from paypal.express.dashboard.app import application
from django.conf import settings
from oscarstore import views as oscarstoreview
urlpatterns = patterns(
'',
url(r'^uploads/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
# url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^home.html$', SignInAndSignUp.as_view(template_name='home.html'),
name='home.html'),
url(r'^$', SignInAndSignUp.as_view(template_name='home.html'),
name='home'),
url(r'^product/$', oscarstoreview.main,
name='product'),
url(r'^product-old/$', ProductView.as_view(),
name='product-old'),
url(r'^services/$', ServiceView.as_view(),
name='services'),
url(r'^contact/$', ContactView.as_view(),
name='contact'),
url(r'^about/$', AboutView.as_view(),
name='about'),
url(r'^accounts/logout$', LogoutView.as_view(),
name='logout'),
url(r'^admin/', include(admin.site.urls)),
url(r'^oscar/', include(oscarapplication.urls)),
url(r'^oscar/checkout/paypal/', include('paypal.express.urls')),
url(r'^oscar/dashboard/paypal/express/', include(application.urls)),
url(r'^paypal/redirect/', oscarstoreview.RedirectView.as_view(), name='paypal-redirect'),
)
| {
"content_hash": "8fbef4b0594deff9d0ab43abecb38869",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 104,
"avg_line_length": 38.825,
"alnum_prop": 0.6658081133290406,
"repo_name": "aarticianpc/greenpointtrees",
"id": "1e054d5d8fabb5d886ace0d00c25bf43f1c20f31",
"size": "1553",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/superbook/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7167"
},
{
"name": "CoffeeScript",
"bytes": "19515"
},
{
"name": "HTML",
"bytes": "37558"
},
{
"name": "JavaScript",
"bytes": "61674"
},
{
"name": "Python",
"bytes": "56604"
}
],
"symlink_target": ""
} |
from django.db import migrations
def rm_import_waffle_switch(apps, schema_editor):
Switch = apps.get_model('waffle', 'Switch')
# drop the waffle switch blocklist_auto_import to avoid confusion
Switch.objects.filter(name='blocklist_auto_import').delete()
class Migration(migrations.Migration):
dependencies = [
('blocklist', '0023_auto_20201103_1108'),
]
operations = [
migrations.DeleteModel(
name='LegacyImport',
),
migrations.RunPython(rm_import_waffle_switch),
]
| {
"content_hash": "928c1a6267bd7ce5da7d00161be6540d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 27.15,
"alnum_prop": 0.6611418047882136,
"repo_name": "diox/olympia",
"id": "a6cc4a33cfb4e15287715d8dcb77254f7c68bc77",
"size": "592",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/olympia/blocklist/migrations/0024_delete_legacyimport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245459"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290496"
},
{
"name": "JavaScript",
"bytes": "750827"
},
{
"name": "Less",
"bytes": "212819"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6811560"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
} |
"""
archivebot.py - discussion page archiving bot.
usage:
python pwb.py archivebot [OPTIONS] TEMPLATE_PAGE
Bot examines backlinks (Special:WhatLinksHere) to TEMPLATE_PAGE.
Then goes through all pages (unless a specific page specified using options)
and archives old discussions. This is done by breaking a page into threads,
then scanning each thread for timestamps. Threads older than a specified
threshold are then moved to another page (the archive), which can be named
either basing on the thread's name or then name can contain a counter which
will be incremented when the archive reaches a certain size.
Transcluded template may contain the following parameters:
{{TEMPLATE_PAGE
|archive =
|algo =
|counter =
|maxarchivesize =
|minthreadsleft =
|minthreadstoarchive =
|archiveheader =
|key =
}}
Meanings of parameters are:
archive Name of the page to which archived threads will be put.
Must be a subpage of the current page. Variables are
supported.
algo Specifies the maximum age of a thread. Must be
in the form old(<delay>) where <delay> specifies
the age in seconds (s), hours (h), days (d),
weeks (w), or years (y) like 24h or 5d. Default is
old(24h).
counter The current value of a counter which could be assigned as
variable. Will be updated by bot. Initial value is 1.
maxarchivesize The maximum archive size before incrementing the counter.
Value can be given with appending letter like K or M
which indicates KByte or MByte. Default value is 200K.
minthreadsleft Minimum number of threads that should be left on a page.
Default value is 5.
minthreadstoarchive The minimum number of threads to archive at once. Default
value is 2.
archiveheader Content that will be put on new archive pages as the
header. This parameter supports the use of variables.
Default value is {{talkarchive}}
key A secret key that (if valid) allows archives not to be
subpages of the page being archived.
Variables below can be used in the value for "archive" in the template above:
%(counter)d the current value of the counter
%(year)d year of the thread being archived
%(isoyear)d ISO year of the thread being archived
%(isoweek)d ISO week number of the thread being archived
%(semester)d semester term of the year of the thread being archived
%(quarter)d quarter of the year of the thread being archived
%(month)d month (as a number 1-12) of the thread being archived
%(monthname)s localized name of the month above
%(monthnameshort)s first three letters of the name above
%(week)d week number of the thread being archived
The ISO calendar starts with the Monday of the week which has at least four
days in the new Gregorian calendar. If January 1st is between Monday and
Thursday (including), the first week of that year started the Monday of that
week, which is in the year before if January 1st is not a Monday. If it's
between Friday or Sunday (including) the following week is then the first week
of the year. So up to three days are still counted as the year before.
See also:
- https://webspace.science.uu.nl/~gent0113/calendar/isocalendar.htm
- https://docs.python.org/3/library/datetime.html#datetime.date.isocalendar
Options (may be omitted):
-help show this help message and exit
-calc:PAGE calculate key for PAGE and exit
-file:FILE load list of pages from FILE
-force override security options
-locale:LOCALE switch to locale LOCALE
-namespace:NS only archive pages from a given namespace
-page:PAGE archive a single PAGE, default ns is a user talk page
-salt:SALT specify salt
"""
#
# (C) Pywikibot team, 2006-2022
#
# Distributed under the terms of the MIT license.
#
import datetime
import locale
import math
import os
import re
import time
import types
from collections import OrderedDict, defaultdict
from hashlib import md5
from math import ceil
from typing import Any, Optional, Pattern
from warnings import warn
import pywikibot
from pywikibot import i18n
from pywikibot.backports import List, Set, Tuple
from pywikibot.date import apply_month_delta
from pywikibot.exceptions import Error, NoPageError
from pywikibot.textlib import (
TimeStripper,
case_escape,
extract_sections,
findmarker,
to_local_digits,
)
ShouldArchive = Tuple[str, str]
Size = Tuple[int, str]
ZERO = datetime.timedelta(0)
MW_KEYS = types.MappingProxyType({
's': 'seconds',
'h': 'hours',
'd': 'days',
'w': 'weeks',
'y': 'years',
# 'months' and 'minutes' were removed because confusion outweighs merit
})
class ArchiveBotSiteConfigError(Error):
"""There is an error originated by archivebot's on-site configuration."""
class MalformedConfigError(ArchiveBotSiteConfigError):
"""There is an error in the configuration template."""
class MissingConfigError(ArchiveBotSiteConfigError):
"""
The config is missing in the header.
It's in one of the threads or transcluded from another page.
"""
class AlgorithmError(MalformedConfigError):
"""Invalid specification of archiving algorithm."""
class ArchiveSecurityError(ArchiveBotSiteConfigError):
"""
Page title is not a valid archive of page being archived.
The page title is neither a subpage of the page being archived,
nor does it match the key specified in the archive configuration template.
"""
def str2localized_duration(site, string: str) -> str:
"""
Localise a shorthand duration.
Translates a duration written in the shorthand notation (ex. "24h", "7d")
into an expression in the local wiki language ("24 hours", "7 days").
"""
key, duration = checkstr(string)
template = site.mediawiki_message(MW_KEYS[key])
if template:
# replace plural variants
exp = i18n.translate(site.code, template, {'$1': int(duration)})
return exp.replace('$1', to_local_digits(duration, site.code))
return to_local_digits(string, site.code)
def str2time(string: str, timestamp=None) -> datetime.timedelta:
"""
Return a timedelta for a shorthand duration.
:param string: a string defining a time period:
Examples::
300s - 300 seconds
36h - 36 hours
7d - 7 days
2w - 2 weeks (14 days)
1y - 1 year
:param timestamp: a timestamp to calculate a more accurate duration offset
used by years
:type timestamp: datetime.datetime
:return: the corresponding timedelta object
"""
key, duration = checkstr(string)
if duration.isdigit():
duration = int(duration)
else:
key = ''
if key in ['d', 's', 'h', 'w']: # days, seconds, hours, weeks
return datetime.timedelta(**{MW_KEYS[key]: duration})
if key == 'y': # years
days = math.ceil(duration * 365.25)
duration *= 12
else:
raise MalformedConfigError(
'Unrecognized parameter in template: {}'.format(string))
if timestamp:
return apply_month_delta(
timestamp.date(), month_delta=duration) - timestamp.date()
return datetime.timedelta(days=days)
def checkstr(string: str) -> Tuple[str, str]:
"""
Return the key and duration extracted from the string.
:param string: a string defining a time period
Examples::
300s - 300 seconds
36h - 36 hours
7d - 7 days
2w - 2 weeks (14 days)
1y - 1 year
:return: key and duration extracted form the string
"""
if len(string) < 2:
raise MalformedConfigError('Time period should be a numeric value '
'followed by its qualifier')
key, duration = string[-1], string[:-1]
if key not in MW_KEYS:
raise MalformedConfigError('Time period qualifier is unrecognized: {}'
.format(string))
if not duration.isdigit():
raise MalformedConfigError("Time period's duration should be "
'numeric: {}'.format(string))
return key, duration
def str2size(string: str) -> Size:
"""
Return a size for a shorthand size.
Accepts a string defining a size::
1337 - 1337 bytes
150K - 150 kilobytes
2M - 2 megabytes
@Returns: a tuple ``(size, unit)``, where ``size`` is an integer and
unit is ``'B'`` (bytes) or ``'T'`` (threads).
"""
match = re.fullmatch(r'(\d{1,3}(?: \d{3})+|\d+) *([BkKMT]?)', string)
if not match:
raise MalformedConfigError("Couldn't parse size: {}".format(string))
val, unit = (int(match.group(1).replace(' ', '')), match.group(2))
if unit == 'M':
val *= 1024
unit = 'K'
if unit in ('K', 'k'):
val *= 1024
if unit != 'T':
unit = 'B'
return val, unit
def template_title_regex(tpl_page: pywikibot.Page) -> Pattern:
"""
Return a regex that matches to variations of the template title.
It supports the transcluding variant as well as localized namespaces and
case-insensitivity depending on the namespace.
:param tpl_page: The template page
:type tpl_page: pywikibot.page.Page
"""
ns = tpl_page.site.namespaces[tpl_page.namespace()]
marker = '?' if ns.id == 10 else ''
title = tpl_page.title(with_ns=False)
title = case_escape(ns.case, title)
return re.compile(r'(?:(?:{}):){}{}'.format('|'.join(ns), marker, title))
def calc_md5_hexdigest(txt, salt) -> str:
"""Return md5 hexdigest computed from text and salt."""
s = md5()
s.update(salt.encode('utf-8'))
s.update(b'\n')
s.update(txt.encode('utf8'))
s.update(b'\n')
return s.hexdigest()
class TZoneUTC(datetime.tzinfo):
"""Class building a UTC tzinfo object."""
def utcoffset(self, dt) -> datetime.timedelta:
"""Subclass implementation, return timedelta(0)."""
return ZERO
def tzname(self, dt) -> str:
"""Subclass implementation."""
return 'UTC'
def dst(self, dt) -> datetime.timedelta:
"""Subclass implementation, return timedelta(0)."""
return ZERO
def __repr__(self) -> str:
"""Return a string representation."""
return '{}()'.format(self.__class__.__name__)
class DiscussionThread:
"""
An object representing a discussion thread on a page.
It represents something that is of the form::
== Title of thread ==
Thread content here. ~~~~
:Reply, etc. ~~~~
"""
def __init__(self, title: str, timestripper: TimeStripper) -> None:
"""Initializer."""
self.title = title
self.ts = timestripper
self.code = self.ts.site.code
self.content = ''
self.timestamp = None
def __repr__(self) -> str:
"""Return a string representation."""
return '{}("{}",{} bytes)'.format(self.__class__.__name__, self.title,
len(self.content.encode('utf-8')))
def feed_line(self, line: str) -> None:
"""Add a line to the content and find the newest timestamp."""
if not self.content and not line:
return
self.content += line + '\n'
timestamp = self.ts.timestripper(line)
if not self.timestamp: # first time
self.timestamp = timestamp
if timestamp:
self.timestamp = max(self.timestamp, timestamp)
def size(self) -> int:
"""
Return size of discussion thread.
Note that the result is NOT equal to that of
len(self.to_text()). This method counts bytes, rather than
codepoints (characters). This corresponds to MediaWiki's
definition of page size.
"""
return len(self.title.encode('utf-8')) + len(
self.content.encode('utf-8')) + 12
def to_text(self) -> str:
"""Return wikitext discussion thread."""
return '== {} ==\n\n{}'.format(self.title, self.content)
class DiscussionPage(pywikibot.Page):
"""
A class that represents a single page of discussion threads.
Feed threads to it and run an update() afterwards.
"""
def __init__(self, source, archiver, params=None) -> None:
"""Initializer."""
super().__init__(source)
self.threads = []
self.full = False
self.archiver = archiver
# for testing purposes we allow archiver to be None and we are able
# to create the a DiscussionPage in this way:
# >>> import pywikibot as py
# >>> from scripts.archivebot import DiscussionPage
# >>> d = DiscussionPage(py.Page(py.Site(), <talk page name>), None)
if archiver is None:
self.timestripper = TimeStripper(self.site)
else:
self.timestripper = self.archiver.timestripper
self.params = params
try:
self.load_page()
except NoPageError:
self.header = archiver.get_attr('archiveheader',
i18n.twtranslate(
self.site.code,
'archivebot-archiveheader'))
if self.params:
self.header = self.header % self.params
def load_page(self) -> None:
"""Load the page to be archived and break it up into threads."""
self.header = ''
self.threads = []
self.archives = {}
self.archived_threads = 0
# Exclude unsupported headings (h1, h3, etc):
# adding the marker will make them ignored by extract_sections()
text = self.get()
marker = findmarker(text)
text = re.sub(r'^((=|={3,})[^=])', marker + r'\1', text, flags=re.M)
# Find threads, avoid archiving categories or interwiki
header, threads, footer = extract_sections(text, self.site)
header = header.replace(marker, '')
if header and footer:
self.header = '\n\n'.join((header.rstrip(), footer, ''))
else:
self.header = header + footer
for thread_heading, thread_content in threads:
cur_thread = DiscussionThread(thread_heading.strip('= '),
self.timestripper)
# remove heading line
_, *lines = thread_content.replace(marker, '').splitlines()
for line in lines:
cur_thread.feed_line(line)
self.threads.append(cur_thread)
# This extra info is not desirable when run under the unittest
# framework, which may be run either directly or via setup.py
if pywikibot.calledModuleName() not in ['archivebot_tests', 'setup']:
pywikibot.output('{} thread(s) found on {}'
.format(len(self.threads), self))
def is_full(self, max_archive_size: Size) -> bool:
"""Check whether archive size exceeded."""
size, unit = max_archive_size
if self.size() > self.archiver.maxsize:
self.full = True # xxx: this is one-way flag
elif unit == 'B':
if self.size() >= size:
self.full = True
elif unit == 'T':
if len(self.threads) >= size:
self.full = True
return self.full
def feed_thread(self, thread: DiscussionThread,
max_archive_size: Size) -> bool:
"""Append a new thread to the archive."""
self.threads.append(thread)
self.archived_threads += 1
return self.is_full(max_archive_size)
def size(self) -> int:
"""
Return size of talk page threads.
Note that this method counts bytes, rather than codepoints
(characters). This corresponds to MediaWiki's definition
of page size.
"""
return len(self.header.encode('utf-8')) + sum(t.size()
for t in self.threads)
def update(self, summary, sort_threads: bool = False) -> None:
"""Recombine threads and save page."""
if sort_threads:
pywikibot.output('Sorting threads...')
self.threads.sort(key=lambda t: t.timestamp)
newtext = re.sub('\n*$', '\n\n', self.header) # Fix trailing newlines
for t in self.threads:
newtext += t.to_text()
if self.full:
summary += ' ' + i18n.twtranslate(self.site.code,
'archivebot-archive-full')
self.text = newtext
self.save(summary)
class PageArchiver:
"""A class that encapsulates all archiving methods."""
algo = 'none'
def __init__(self, page, template, salt: str, force: bool = False) -> None:
"""Initializer.
:param page: a page object to be archived
:type page: :py:obj:`pywikibot.Page`
:param template: a template with configuration settings
:type template: :py:obj:`pywikibot.Page`
:param salt: salt value
:param force: override security value
"""
self.attributes = OrderedDict([
('archive', ['', False]),
('algo', ['old(24h)', False]),
('counter', ['1', False]),
('maxarchivesize', ['200K', False]),
])
self.salt = salt
self.force = force
self.site = page.site
self.tpl = template
self.timestripper = TimeStripper(site=self.site)
# read maxarticlesize
try:
# keep a gap of 1 KB not to block later changes
self.maxsize = self.site.siteinfo['maxarticlesize'] - 1024
except KeyError: # mw < 1.28
self.maxsize = 2096128 # 2 MB - 1 KB gap
self.page = DiscussionPage(page, self)
self.load_config()
self.comment_params = {
'from': self.page.title(),
}
self.now = datetime.datetime.utcnow().replace(tzinfo=TZoneUTC())
self.archives = {}
self.archived_threads = 0
self.month_num2orig_names = {}
for n, (long, short) in enumerate(self.site.months_names, start=1):
self.month_num2orig_names[n] = {'long': long, 'short': short}
def get_attr(self, attr, default='') -> Any:
"""Get an archiver attribute."""
return self.attributes.get(attr, [default])[0]
def set_attr(self, attr, value, out: bool = True) -> None:
"""Set an archiver attribute."""
if attr == 'archive':
value = value.replace('_', ' ')
elif attr == 'maxarchivesize':
size, unit = str2size(value)
if unit == 'B':
if size > self.maxsize:
value = '{} K'.format(self.maxsize // 1024)
warn('Siteinfo "maxarticlesize" exceeded. Decreasing '
'"maxarchivesize" to ' + value,
ResourceWarning, stacklevel=2)
self.attributes[attr] = [value, out]
def saveables(self) -> List[str]:
"""Return a list of saveable attributes."""
return [a for a in self.attributes if self.attributes[a][1]
and a != 'maxage']
def attr2text(self) -> str:
"""Return a template with archiver saveable attributes."""
return '{{%s\n%s\n}}' \
% (self.tpl.title(with_ns=(self.tpl.namespace() != 10)),
'\n'.join('|{} = {}'.format(a, self.get_attr(a))
for a in self.saveables()))
def key_ok(self) -> bool:
"""Return whether key is valid."""
hexdigest = calc_md5_hexdigest(self.page.title(), self.salt)
return self.get_attr('key') == hexdigest
def load_config(self) -> None:
"""Load and validate archiver template."""
pywikibot.output('Looking for: {{{{{}}}}} in {}'.format(
self.tpl.title(), self.page))
for tpl, params in self.page.raw_extracted_templates:
try: # Check tpl name before comparing; it might be invalid.
tpl_page = pywikibot.Page(self.site, tpl, ns=10)
tpl_page.title()
except Error:
continue
if tpl_page == self.tpl:
for item, value in params.items():
self.set_attr(item.strip(), value.strip())
break
else:
raise MissingConfigError('Missing or malformed template')
if not self.get_attr('algo', ''):
raise MissingConfigError('Missing argument "algo" in template')
if not self.get_attr('archive', ''):
raise MissingConfigError('Missing argument "archive" in template')
def should_archive_thread(self, thread: DiscussionThread
) -> Optional[ShouldArchive]:
"""
Check whether a thread has to be archived.
:return: the archivation reason as a tuple of localization args
"""
# Archived by timestamp
algo = self.get_attr('algo')
re_t = re.fullmatch(r'old\((.*)\)', algo)
if re_t:
if not thread.timestamp:
return None
# TODO: handle unsigned
maxage = str2time(re_t.group(1), thread.timestamp)
if self.now - thread.timestamp > maxage:
duration = str2localized_duration(self.site, re_t.group(1))
return ('duration', duration)
# TODO: handle marked with template
return None
def get_archive_page(self, title: str, params=None) -> DiscussionPage:
"""
Return the page for archiving.
If it doesn't exist yet, create and cache it.
Also check for security violations.
"""
page_title = self.page.title()
archive = pywikibot.Page(self.site, title)
if not (self.force or title.startswith(page_title + '/')
or self.key_ok()):
raise ArchiveSecurityError(
'Archive page {} does not start with page title ({})!'
.format(archive, page_title))
if title not in self.archives:
self.archives[title] = DiscussionPage(archive, self, params)
return self.archives[title]
def get_params(self, timestamp, counter: int) -> dict:
"""Make params for archiving template."""
lang = self.site.lang
return {
'counter': to_local_digits(counter, lang),
'year': to_local_digits(timestamp.year, lang),
'isoyear': to_local_digits(timestamp.isocalendar()[0], lang),
'isoweek': to_local_digits(timestamp.isocalendar()[1], lang),
'semester': to_local_digits(int(ceil(timestamp.month / 6)), lang),
'quarter': to_local_digits(int(ceil(timestamp.month / 3)), lang),
'month': to_local_digits(timestamp.month, lang),
'monthname': self.month_num2orig_names[timestamp.month]['long'],
'monthnameshort': self.month_num2orig_names[
timestamp.month]['short'],
'week': to_local_digits(
int(time.strftime('%W', timestamp.timetuple())), lang),
}
def analyze_page(self) -> Set[ShouldArchive]:
"""Analyze DiscussionPage."""
max_arch_size = str2size(self.get_attr('maxarchivesize'))
counter = int(self.get_attr('counter', '1'))
pattern = self.get_attr('archive')
keep_threads = []
threads_per_archive = defaultdict(list)
whys = set()
pywikibot.output('Processing {} threads'
.format(len(self.page.threads)))
for i, thread in enumerate(self.page.threads):
# TODO: Make an option so that unstamped (unsigned) posts get
# archived.
why = self.should_archive_thread(thread)
if not why or why[0] != 'duration':
keep_threads.append(i)
continue
params = self.get_params(thread.timestamp, counter)
# this is actually just a dummy key to group the threads by
# "era" regardless of the counter and deal with it later
key = pattern % params
threads_per_archive[key].append((i, thread))
whys.add(why) # xxx: we don't know if we ever archive anything
params = self.get_params(self.now, counter)
aux_params = self.get_params(self.now, counter + 1)
counter_matters = (pattern % params) != (pattern % aux_params)
del params, aux_params
# we need to start with the oldest archive since that is
# the one the saved counter applies to, so sort the groups
# by the oldest timestamp
groups = sorted(threads_per_archive.values(),
key=lambda group: min(t.timestamp for _, t in group))
era_change = False
for group in groups:
# We will reset counter IFF:
# 1. it matters (AND)
# 2. "era" (year, month, etc.) changes (AND)
# 3. there is something to put to the new archive.
for i, thread in group:
threads_left = len(self.page.threads) - self.archived_threads
if threads_left <= int(self.get_attr('minthreadsleft', 5)):
keep_threads.append(i)
continue # Because there's too little threads left.
if era_change:
era_change = False
counter = 1
params = self.get_params(thread.timestamp, counter)
archive = self.get_archive_page(pattern % params, params)
if counter_matters:
while counter > 1 and not archive.exists():
# This may happen when either:
# 1. a previous version of the bot run and reset
# the counter without archiving anything
# (number #3 above)
# 2. era changed between runs.
# Decrease the counter.
# TODO: This can be VERY slow, use preloading
# or binary search.
counter -= 1
params = self.get_params(thread.timestamp, counter)
archive = self.get_archive_page(
pattern % params, params)
while archive.is_full(max_arch_size):
counter += 1
params = self.get_params(thread.timestamp, counter)
archive = self.get_archive_page(
pattern % params, params)
archive.feed_thread(thread, max_arch_size)
self.archived_threads += 1
if counter_matters:
era_change = True
if self.archived_threads:
self.page.threads = [self.page.threads[i]
for i in sorted(keep_threads)]
self.set_attr('counter', str(counter))
return whys
return set()
def run(self) -> None:
"""Process a single DiscussionPage object."""
if not self.page.botMayEdit():
return
whys = self.analyze_page()
mintoarchive = int(self.get_attr('minthreadstoarchive', 2))
if self.archived_threads < mintoarchive:
# We might not want to archive a measly few threads
# (lowers edit frequency)
pywikibot.output('Only {} (< {}) threads are old enough. Skipping'
.format(self.archived_threads, mintoarchive))
return
if whys:
# Search for the marker template
rx = re.compile(r'\{\{%s\s*?\n.*?\n\}\}'
% (template_title_regex(self.tpl).pattern),
re.DOTALL)
if not rx.search(self.page.header):
raise MalformedConfigError(
"Couldn't find the template in the header"
)
pywikibot.output('Archiving {} thread(s).'
.format(self.archived_threads))
# Save the archives first (so that bugs don't cause a loss of data)
for title, archive in sorted(self.archives.items()):
count = archive.archived_threads
if count == 0:
continue
self.comment_params['count'] = count
comment = i18n.twtranslate(self.site.code,
'archivebot-archive-summary',
self.comment_params)
archive.update(comment)
# Save the page itself
self.page.header = rx.sub(self.attr2text(), self.page.header)
self.comment_params['count'] = self.archived_threads
comma = self.site.mediawiki_message('comma-separator')
self.comment_params['archives'] = comma.join(
a.title(as_link=True) for a in self.archives.values()
if a.archived_threads > 0
)
# Find out the reasons and return them localized
translated_whys = set()
for why, arg in whys:
# Archived by timestamp
if why == 'duration':
translated_whys.add(
i18n.twtranslate(self.site.code,
'archivebot-older-than',
{'duration': arg,
'count': self.archived_threads}))
# TODO: handle unsigned or archived by template
self.comment_params['why'] = comma.join(translated_whys)
comment = i18n.twtranslate(self.site.code,
'archivebot-page-summary',
self.comment_params)
self.page.update(comment)
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
filename = None
pagename = None
namespace = None
salt = ''
force = False
calc = None
templates = []
local_args = pywikibot.handle_args(args)
for arg in local_args:
option, _, value = arg.partition(':')
if not option.startswith('-'):
templates.append(arg)
continue
option = option[1:]
if option in ('file', 'filename'):
filename = value
elif option == 'locale':
# Required for english month names
locale.setlocale(locale.LC_TIME, value.encode('utf8'))
elif option == 'timezone':
os.environ['TZ'] = value.timezone
# Or use the preset value
if hasattr(time, 'tzset'):
time.tzset()
elif option == 'calc':
calc = value
elif option == 'salt':
salt = value
elif option == 'force':
force = True
elif option == 'page':
pagename = value
elif option == 'namespace':
namespace = value
site = pywikibot.Site()
if calc:
if not salt:
pywikibot.bot.suggest_help(missing_parameters=['-salt'])
return
page = pywikibot.Page(site, calc)
if page.exists():
calc = page.title()
else:
pywikibot.output(
'NOTE: the specified page "{}" does not (yet) exist.'
.format(calc))
pywikibot.output('key = {}'.format(calc_md5_hexdigest(calc, salt)))
return
if not templates:
pywikibot.bot.suggest_help(
additional_text='No template was specified.')
return
for template_name in templates:
pagelist = []
tmpl = pywikibot.Page(site, template_name, ns=10)
if not filename and not pagename:
if namespace is not None:
ns = [str(namespace)]
else:
ns = []
pywikibot.output('Fetching template transclusions...')
pagelist.extend(tmpl.getReferences(only_template_inclusion=True,
follow_redirects=False,
namespaces=ns))
if filename:
for pg in open(filename).readlines():
pagelist.append(pywikibot.Page(site, pg, ns=10))
if pagename:
pagelist.append(pywikibot.Page(site, pagename, ns=3))
pagelist.sort()
for pg in pagelist:
pywikibot.output('Processing {}'.format(pg))
# Catching exceptions, so that errors in one page do not bail out
# the entire process
try:
archiver = PageArchiver(pg, tmpl, salt, force)
archiver.run()
except ArchiveBotSiteConfigError as e:
# no stack trace for errors originated by pages on-site
pywikibot.error('Missing or malformed template in page {}: {}'
.format(pg, e))
except Exception:
pywikibot.error('Error occurred while processing page {}'
.format(pg))
pywikibot.exception(tb=True)
if __name__ == '__main__':
main()
| {
"content_hash": "e67721372847c54fa4527f2c06994f1a",
"timestamp": "",
"source": "github",
"line_count": 918,
"max_line_length": 79,
"avg_line_length": 36.9041394335512,
"alnum_prop": 0.5674774189739654,
"repo_name": "wikimedia/pywikibot-core",
"id": "04e365b3149149d92a2b6b84dbe78c209e6d9e25",
"size": "33897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/archivebot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4504123"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djaaks.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "12a072c2ea002685ac627d1206e1c824",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "rklabs/djaaks",
"id": "0b92de5ab8a86d05a615319b69f87d66469a0677",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11366"
},
{
"name": "Python",
"bytes": "18966"
}
],
"symlink_target": ""
} |
'''
Used to convert the Microsoft Sentence Completion Challnege (MSCC) learning corpus into a one-sentence-per-line format.
'''
import sys
from nltk.tokenize import word_tokenize, sent_tokenize
def write_paragraph_lines(paragraph_lines):
paragraph_str = ' '.join(paragraph_lines)
for sent in sent_tokenize(paragraph_str):
if lowercase:
sent = sent.lower()
output_file.write(' '.join(word_tokenize(sent))+'\n')
lowercase = True
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <input-filename> <output-filename>\n" % sys.argv[0])
sys.exit(1)
input_file = open(sys.argv[1],'r')
output_file = open(sys.argv[2],'w')
paragraph_lines = []
for i, line in enumerate(input_file):
if len(line.strip()) == 0 and len(paragraph_lines) > 0:
write_paragraph_lines(paragraph_lines)
paragraph_lines = []
else:
paragraph_lines.append(line)
if len(paragraph_lines) > 0:
write_paragraph_lines(paragraph_lines)
print('Read {} lines'.format(i))
input_file.close()
output_file.close()
| {
"content_hash": "374650b6fb2a857cb5c137a6d24d4110",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 119,
"avg_line_length": 27.045454545454547,
"alnum_prop": 0.5915966386554622,
"repo_name": "orenmel/context2vec",
"id": "aca506beda482ed20089785d25c1e8e6018e28cd",
"size": "1190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "context2vec/eval/mscc_text_tokenize.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48996"
}
],
"symlink_target": ""
} |
import contextlib
import locale
import subprocess
import sys
from typing import IO, Mapping, Sequence, Tuple, Union
def shutdown_process(proc: subprocess.Popen) -> Tuple[bytes, bytes]:
"""Gracefully shutdown a child process."""
with contextlib.suppress(subprocess.TimeoutExpired):
return proc.communicate(timeout=0.3)
proc.terminate()
with contextlib.suppress(subprocess.TimeoutExpired):
return proc.communicate(timeout=0.2)
proc.kill()
return proc.communicate()
def decode_output(output: bytes) -> str:
"""Try to decode the given bytes with encodings from the system.
:param output: output to decode
:raises UnicodeDecodeError: if all encodings fail
:return: decoded string
"""
try:
return output.decode("utf-8")
except UnicodeDecodeError:
second_encoding = locale.getpreferredencoding()
if second_encoding.casefold() in ("utf8", "utf-8"):
raise
return output.decode(second_encoding)
def popen(
args: Sequence[str],
env: Mapping[str, str] = None,
silent: bool = False,
stdout: Union[int, IO] = None,
stderr: Union[int, IO] = subprocess.STDOUT,
) -> Tuple[int, str]:
if silent and stdout is not None:
raise ValueError(
"Can not specify silent and stdout; passing a custom stdout always silences the commands output in Nox's log."
)
if silent:
stdout = subprocess.PIPE
proc = subprocess.Popen(args, env=env, stdout=stdout, stderr=stderr)
try:
out, err = proc.communicate()
sys.stdout.flush()
except KeyboardInterrupt:
out, err = shutdown_process(proc)
if proc.returncode != 0:
raise
return_code = proc.wait()
return return_code, decode_output(out) if out else ""
| {
"content_hash": "8b0be7fa9e69287d092a4d90eedf829b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 122,
"avg_line_length": 26.391304347826086,
"alnum_prop": 0.6562328390993959,
"repo_name": "jonparrott/nox",
"id": "9bd1c115a856577842bc00d7b25a9f96722e2271",
"size": "2410",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "nox/popen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "131305"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools
from pants.base.build_environment import get_pants_cachedir
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependency
from pants_test.jvm.jvm_task_test_base import JvmTaskTestBase
class JvmToolTaskTestBase(JvmTaskTestBase):
"""Prepares an ephemeral test build root that supports tasks that use jvm tool bootstrapping.
:API: public
"""
@property
def alias_groups(self):
"""
:API: public
"""
# Aliases appearing in our real BUILD.tools.
return BuildFileAliases(
targets={
'jar_library': JarLibrary,
'target': Target,
},
objects={
'exclude': Exclude,
'jar': JarDependency,
'scala_jar': ScalaJarDependency,
},
)
def setUp(self):
"""
:API: public
"""
super(JvmToolTaskTestBase, self).setUp()
# Use a synthetic subclass for proper isolation when bootstrapping within the test.
bootstrap_scope = 'bootstrap_scope'
self.bootstrap_task_type = self.synthesize_task_subtype(BootstrapJvmTools, bootstrap_scope)
JvmToolMixin.reset_registered_tools()
# Set some options:
# 1. Cap BootstrapJvmTools memory usage in tests. The Xmx was empirically arrived upon using
# -Xloggc and verifying no full gcs for a test using the full gamut of resolving a multi-jar
# tool, constructing a fat jar and then shading that fat jar.
#
# 2. Allow tests to read/write tool jars from the real artifact cache, so they don't
# each have to resolve and shade them every single time, which is a huge slowdown.
# Note that local artifact cache writes are atomic, so it's fine for multiple concurrent
# tests to write to it.
#
# Note that we don't have access to the invoking pants instance's options, so we assume that
# its artifact cache is in the standard location. If it isn't, worst case the tests will
# populate a second cache at the standard location, which is no big deal.
# TODO: We really need a straightforward way for pants's own tests to get to the enclosing
# pants instance's options values.
artifact_caches = [os.path.join(get_pants_cachedir(), 'artifact_cache')]
self.set_options_for_scope(bootstrap_scope, jvm_options=['-Xmx128m'])
self.set_options_for_scope('cache.{}'.format(bootstrap_scope),
read_from=artifact_caches,
write_to=artifact_caches)
# Tool option defaults currently point to targets in the real BUILD.tools, so we copy it
# into our test workspace.
shutil.copy(os.path.join(self.real_build_root, 'BUILD.tools'), self.build_root)
Bootstrapper.reset_instance()
def context(self, for_task_types=None, options=None, passthru_args=None, target_roots=None,
console_outstream=None, workspace=None, for_subsystems=None):
"""
:API: public
"""
# Add in the bootstrapper task type, so its options get registered and set.
for_task_types = [self.bootstrap_task_type] + (for_task_types or [])
return super(JvmToolTaskTestBase, self).context(for_task_types=for_task_types,
options=options,
passthru_args=passthru_args,
target_roots=target_roots,
console_outstream=console_outstream,
workspace=workspace,
for_subsystems=for_subsystems)
def prepare_execute(self, context):
"""Prepares a jvm tool-using task for execution, first bootstrapping any required jvm tools.
Note: Other task pre-requisites will not be ensured and tests must instead setup their own
product requirements if any.
:API: public
:returns: The prepared Task instance.
"""
task = self.create_task(context)
task.invalidate()
# Bootstrap the tools needed by the task under test.
# We need the bootstrap task's workdir to be under the test's .pants.d, so that it can
# use artifact caching. Making it a sibling of the main task's workdir achieves this.
self.bootstrap_task_type._alternate_target_roots(context.options,
self.address_mapper,
self.build_graph)
bootstrap_workdir = os.path.join(os.path.dirname(task.workdir), 'bootstrap_jvm_tools')
self.bootstrap_task_type(context, bootstrap_workdir).execute()
return task
def execute(self, context):
"""Executes a jvm tool-using task, first bootstrapping any required jvm tools.
Note: Other task pre-requisites will not be ensured and tests must instead setup their own
product requirements if any.
:API: public
:returns: The Task instance that was executed.
"""
task = self.prepare_execute(context)
task.execute()
return task
| {
"content_hash": "06a93ed05dfcdef5e510a5fee96f0664",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 99,
"avg_line_length": 42.63157894736842,
"alnum_prop": 0.6583774250440917,
"repo_name": "peiyuwang/pants",
"id": "35ecfe3a5da7aaf40af6b64073378e97ef151e10",
"size": "5817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/jvm/jvm_tool_task_test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "78744"
},
{
"name": "Java",
"bytes": "463179"
},
{
"name": "JavaScript",
"bytes": "30784"
},
{
"name": "Protocol Buffer",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "5586816"
},
{
"name": "Rust",
"bytes": "168825"
},
{
"name": "Scala",
"bytes": "79707"
},
{
"name": "Shell",
"bytes": "64292"
},
{
"name": "Thrift",
"bytes": "2183"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('job', '0016_auto_20170411_0342'),
]
operations = [
migrations.AddField(
model_name='job',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='location', to='job.Location'),
),
]
| {
"content_hash": "129e572ae3b535d5b995189857d39323",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 136,
"avg_line_length": 25.789473684210527,
"alnum_prop": 0.6346938775510204,
"repo_name": "jamesaud/se1-group4",
"id": "8cc18b80d8f0ef65beccc38275ebd251c964331a",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jmatcher/job/migrations/0017_job_location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246623"
},
{
"name": "HTML",
"bytes": "119706"
},
{
"name": "JavaScript",
"bytes": "108620"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "224980"
},
{
"name": "Shell",
"bytes": "8041"
}
],
"symlink_target": ""
} |
from trbm_base import TRBMBase
from copy import deepcopy
import numpy as np
class TRBM(TRBMBase):
def __init__(self, number_visible_units=1, number_hidden_units=1, order=1, network=None):
self.visible_values = list()
self.hidden_values = list()
self.visible_to_visible_bias = list()
self.visible_to_hidden_bias = list()
self.hidden_to_hidden_bias = list()
if network == None:
self.number_visible_units = number_visible_units
self.number_hidden_units = number_hidden_units
self.order = order
for _ in xrange(order+1):
self.visible_values.append(np.zeros((number_visible_units,1)))
self.hidden_values.append(np.zeros((number_hidden_units,1)))
#visible to hidden connections at time t
self.connection_weights = np.random.rand(number_visible_units, number_hidden_units) * 0.05
#bias at time t
self.visible_bias = np.random.rand(number_visible_units,1)
self.hidden_bias = np.random.rand(number_hidden_units,1)
#bias propagated from previous time steps
for _ in xrange(order):
visible_to_visible = np.random.rand(number_visible_units, number_visible_units) * 0.05
visible_to_hidden = np.random.rand(number_visible_units, number_hidden_units) * 0.05
hidden_to_hidden = np.random.rand(number_hidden_units, number_hidden_units) * 0.05
self.visible_to_visible_bias.append(visible_to_visible)
self.visible_to_hidden_bias.append(visible_to_hidden)
self.hidden_to_hidden_bias.append(hidden_to_hidden)
else:
self.number_visible_units = network.number_visible_units
self.number_hidden_units = network.number_hidden_units
self.order = network.order
for _ in xrange(order+1):
self.visible_values.append(np.zeros((network.number_visible_units,1)))
self.hidden_values.append(np.zeros((network.number_hidden_units,1)))
#visible to hidden connections at time t
self.connection_weights = np.array(network.connection_weights)
#bias at time t
self.visible_bias = np.array(network.visible_bias)
self.hidden_bias = np.array(network.hidden_bias)
#bias propagated from previous time steps
for i in xrange(order):
self.visible_to_visible_bias.append(np.array(network.visible_to_visible_bias[i]))
self.visible_to_hidden_bias.append(np.array(network.visible_to_hidden_bias[i]))
self.hidden_to_hidden_bias.append(np.array(network.hidden_to_hidden_bias[i]))
def __deepcopy__(self, memo):
return TRBM(network=self)
def train(self, data, epochs=100, learning_rate=0.1):
"""Trains the Boltzmann machine with a given set of training vectors.
Keyword arguments:
data -- A 'np.array' containing data for training the RBM. Each row of the array should be a training vector of dimension 'number_visible_units'.
epochs -- The number of iterations of the learning algorithm (default 100).
learning_rate -- The algorithm's learning rate (default).
"""
for _ in xrange(epochs):
for t in xrange(self.order):
self.visible_values[t] = self._copy_array(data[t], self.visible_values[t].shape)
self.hidden_values[t] = self._sample_initial(t,1)
number_training_vectors = data.shape[0]
for t in xrange(self.order,number_training_vectors):
self.visible_values[self.order] = self._copy_array(data[t], self.visible_values[self.order].shape)
#we sample the current hidden layer given the visible layer up to time t and the hidden layers up to time t-1
current_time_hidden_bias_values = self.connection_weights.T.dot(self.visible_values[self.order])
hidden_bias = self._bias_function_hidden()
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(current_time_hidden_bias_values[neuron] + hidden_bias[neuron])
self.hidden_values[self.order][neuron] = prob
#we sample from the network
sample,_ = self._sample(1)
#we update the connection weights between the visible and hidden units of the current time step
for i in xrange(self.number_visible_units):
#we update the bias values of the visible units
visible_bias_delta = learning_rate * (self.visible_values[self.order][i] - sample[i])
self.visible_bias[i] = self.visible_bias[i] + visible_bias_delta
for j in xrange(self.number_hidden_units):
data_expectation = self._sigmoid(np.sum(self.connection_weights[:,j] * self.visible_values[self.order]) + self.hidden_bias[j])
sample_expectation = self._sigmoid(np.sum(self.connection_weights[:,j] * sample) + self.hidden_bias[j])
#we update the connection weight between the i-th visible unit and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.visible_values[self.order][i] - sample_expectation * sample[i])
self.connection_weights[i,j] = self.connection_weights[i,j] + weight_change_delta
#we update the bias values of the hidden units
hidden_bias_delta = learning_rate * (data_expectation - sample_expectation)
self.hidden_bias[j] = self.hidden_bias[j] + hidden_bias_delta
#we update the visible to hidden connection weights between the current time step and the previous time steps
for n in xrange(self.order):
value_index = self.order - n - 1
sample,_ = self._sample(1,self.visible_values[value_index])
for i in xrange(self.number_visible_units):
for j in xrange(self.number_hidden_units):
data_expectation = self._sigmoid(np.sum(self.visible_to_hidden_bias[n][:,j] * self.visible_values[value_index]) + self.hidden_bias[j])
sample_expectation = self._sigmoid(np.sum(self.visible_to_hidden_bias[n][:,j] * sample) + self.hidden_bias[j])
#we update the connection weight between the i-th visible unit and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.visible_values[value_index][i] - sample_expectation * sample[i])
self.visible_to_hidden_bias[n][i,j] = self.visible_to_hidden_bias[n][i,j] + weight_change_delta
#we update the visible to visible connection weights between the current time step and the previous time steps
for n in xrange(self.order):
value_index = self.order - n - 1
sample,_ = self._sample(1,self.visible_values[value_index])
for i in xrange(self.number_visible_units):
for j in xrange(self.number_visible_units):
data_expectation = self._sigmoid(np.sum(self.visible_to_visible_bias[n][:,j] * self.visible_values[value_index]) + self.visible_bias[j])
sample_expectation = self._sigmoid(np.sum(self.visible_to_visible_bias[n][:,j] * sample) + self.visible_bias[j])
#we update the connection weight between the i-th and the j-th visible unit
weight_change_delta = learning_rate * (data_expectation * self.visible_values[value_index][i] - sample_expectation * sample[i])
self.visible_to_visible_bias[n][i,j] = self.visible_to_visible_bias[n][i,j] + weight_change_delta
#we update the hidden to hidden connection weights between the current time step and the previous time steps
for n in xrange(self.order):
value_index = self.order - n - 1
_,sample = self._sample(1,self.visible_values[value_index])
for i in xrange(self.number_hidden_units):
for j in xrange(self.number_hidden_units):
data_expectation = self._sigmoid(np.sum(self.hidden_to_hidden_bias[n][:,j] * self.hidden_values[value_index]) + self.hidden_bias[j])
sample_expectation = self._sigmoid(np.sum(self.hidden_to_hidden_bias[n][:,j] * sample) + self.hidden_bias[j])
#we update the connection weight between the i-th and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.hidden_values[value_index][i] - sample_expectation * sample[i])
self.hidden_to_hidden_bias[n][i,j] = self.hidden_to_hidden_bias[n][i,j] + weight_change_delta
#we move the visible vectors one time step back
self._shift_visible_vectors_back()
def initialise(self, initial_data):
for t in xrange(self.order):
self.visible_values[t] = self._copy_array(initial_data[t,:], self.visible_values[t].shape)
self.hidden_values[t] = self._sample_initial(t,1)
def sample_network(self, current_vector=None):
"""Samples a visible vector from the network.
Keyword arguments:
current_vector -- Data vector at time t given as a 'np.array' of dimension (number_visible_units).
initial_data -- Data used for initialising the network, given as a 'np.array' of dimension (number_visible_units,order) (default None, meaning that the network has already been initialised).
Returns:
visible_units -- A 'np.array' containing the sampled values.
"""
if current_vector == None:
for i in xrange(len(self.visible_values[self.order])):
self.visible_values[self.order][i] = np.random.rand()
else:
self.visible_values[self.order] = self._copy_array(current_vector, self.visible_values[self.order].shape)
visible_units = np.array(self.visible_values[self.order])
hidden_units = np.array(self.hidden_values[self.order])
current_time_visible_bias_values = self.connection_weights.dot(self.hidden_values[self.order])
visible_bias = self._bias_function_visible()
current_time_hidden_bias_values = self.connection_weights.T.dot(self.visible_values[self.order])
hidden_bias = self._bias_function_hidden()
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(current_time_hidden_bias_values[neuron] + hidden_bias[neuron])
hidden_units[neuron] = prob
for neuron in xrange(self.number_visible_units):
prob = self._sigmoid(current_time_visible_bias_values[neuron] + visible_bias[neuron])
visible_units[neuron] = prob
self._shift_visible_vectors_back()
return visible_units
def _sample(self, k, training_vector=None):
"""Samples a visible vector and a hidden vector given a training vector.
Uses Contrastive Divergence for sampling the values.
Keyword arguments:
k -- The number of samples created by Contrastive Divergence before a sample is accepted.
training_vector -- A vector that should be used at the t-th time step. (default None, resulting in a vector already stored in self.visible_values[self.order]).
Returns:
visible_units -- A 'np.array' containing the sampled visible values.
hidden_units -- A 'np.array' containing the sampled hidden values.
"""
visible_units = None
if training_vector == None:
visible_units = np.array(self.visible_values[self.order])
else:
visible_units = np.array(training_vector)
hidden_units = np.array(self.hidden_values[self.order])
current_time_visible_bias_values = self.connection_weights.dot(self.hidden_values[self.order])
visible_bias = self._bias_function_visible()
current_time_hidden_bias_values = self.connection_weights.T.dot(self.visible_values[self.order])
hidden_bias = self._bias_function_hidden()
for sample in xrange(k):
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(current_time_hidden_bias_values[neuron] + hidden_bias[neuron])
hidden_units[neuron] = prob
for neuron in xrange(self.number_visible_units):
prob = self._sigmoid(current_time_visible_bias_values[neuron] + visible_bias[neuron])
visible_units[neuron] = prob
return visible_units, hidden_units
def _sample_initial(self, t, k):
"""Samples a hidden layer given only on the visible vector at the current time step.
Uses Contrastive Divergence for sampling the values.
Keyword arguments:
t -- Current time step.
k -- The number of samples created by Contrastive Divergence before a sample is accepted.
Returns:
hidden_units -- A 'np.array' containing the sampled hidden values.
"""
visible_units = np.array(self.visible_values[t])
hidden_units = np.array(self.hidden_values[t])
for sample in xrange(k):
for neuron in xrange(self.number_hidden_units):
prob = self._sigmoid(np.sum(self.connection_weights[:,neuron] * visible_units) + self.hidden_bias[neuron])
hidden_units[neuron] = prob
for neuron in xrange(self.number_visible_units):
prob = self._sigmoid(np.sum(self.connection_weights[neuron,:] * hidden_units) + self.visible_bias[neuron])
visible_units[neuron] = prob
return hidden_units | {
"content_hash": "b8469601fcac38abb9efc0a7f0bb4016",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 198,
"avg_line_length": 55.20703125,
"alnum_prop": 0.6163588763885941,
"repo_name": "aleksandar-mitrevski/fault_and_anomaly_detection",
"id": "9aa87b6a6ed2addf1b62ca3e24111f40b1116d70",
"size": "14133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generative_model_fd/brain/machines/trbm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "164809"
}
],
"symlink_target": ""
} |
from ._models_py3 import AgentPool
from ._models_py3 import AgentPoolAvailableVersions
from ._models_py3 import AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem
from ._models_py3 import AgentPoolListResult
from ._models_py3 import AgentPoolUpgradeProfile
from ._models_py3 import AgentPoolUpgradeProfilePropertiesUpgradesItem
from ._models_py3 import AgentPoolUpgradeSettings
from ._models_py3 import AzureKeyVaultKms
from ._models_py3 import CloudErrorBody
from ._models_py3 import ContainerServiceDiagnosticsProfile
from ._models_py3 import ContainerServiceLinuxProfile
from ._models_py3 import ContainerServiceMasterProfile
from ._models_py3 import ContainerServiceNetworkProfile
from ._models_py3 import ContainerServiceSshConfiguration
from ._models_py3 import ContainerServiceSshPublicKey
from ._models_py3 import ContainerServiceVMDiagnostics
from ._models_py3 import CreationData
from ._models_py3 import CredentialResult
from ._models_py3 import CredentialResults
from ._models_py3 import EndpointDependency
from ._models_py3 import EndpointDetail
from ._models_py3 import ExtendedLocation
from ._models_py3 import KubeletConfig
from ._models_py3 import LinuxOSConfig
from ._models_py3 import MaintenanceConfiguration
from ._models_py3 import MaintenanceConfigurationListResult
from ._models_py3 import ManagedCluster
from ._models_py3 import ManagedClusterAADProfile
from ._models_py3 import ManagedClusterAPIServerAccessProfile
from ._models_py3 import ManagedClusterAccessProfile
from ._models_py3 import ManagedClusterAddonProfile
from ._models_py3 import ManagedClusterAddonProfileIdentity
from ._models_py3 import ManagedClusterAgentPoolProfile
from ._models_py3 import ManagedClusterAgentPoolProfileProperties
from ._models_py3 import ManagedClusterAutoUpgradeProfile
from ._models_py3 import ManagedClusterHTTPProxyConfig
from ._models_py3 import ManagedClusterIdentity
from ._models_py3 import ManagedClusterIngressProfile
from ._models_py3 import ManagedClusterIngressProfileWebAppRouting
from ._models_py3 import ManagedClusterListResult
from ._models_py3 import ManagedClusterLoadBalancerProfile
from ._models_py3 import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from ._models_py3 import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from ._models_py3 import ManagedClusterLoadBalancerProfileOutboundIPs
from ._models_py3 import ManagedClusterManagedOutboundIPProfile
from ._models_py3 import ManagedClusterNATGatewayProfile
from ._models_py3 import ManagedClusterOIDCIssuerProfile
from ._models_py3 import ManagedClusterPodIdentity
from ._models_py3 import ManagedClusterPodIdentityException
from ._models_py3 import ManagedClusterPodIdentityProfile
from ._models_py3 import ManagedClusterPodIdentityProvisioningError
from ._models_py3 import ManagedClusterPodIdentityProvisioningErrorBody
from ._models_py3 import ManagedClusterPodIdentityProvisioningInfo
from ._models_py3 import ManagedClusterPoolUpgradeProfile
from ._models_py3 import ManagedClusterPoolUpgradeProfileUpgradesItem
from ._models_py3 import ManagedClusterPropertiesAutoScalerProfile
from ._models_py3 import ManagedClusterPropertiesForSnapshot
from ._models_py3 import ManagedClusterSKU
from ._models_py3 import ManagedClusterSecurityProfile
from ._models_py3 import ManagedClusterSecurityProfileAzureDefender
from ._models_py3 import ManagedClusterSecurityProfileWorkloadIdentity
from ._models_py3 import ManagedClusterServicePrincipalProfile
from ._models_py3 import ManagedClusterSnapshot
from ._models_py3 import ManagedClusterSnapshotListResult
from ._models_py3 import ManagedClusterStorageProfile
from ._models_py3 import ManagedClusterStorageProfileDiskCSIDriver
from ._models_py3 import ManagedClusterStorageProfileFileCSIDriver
from ._models_py3 import ManagedClusterStorageProfileSnapshotController
from ._models_py3 import ManagedClusterUpgradeProfile
from ._models_py3 import ManagedClusterWindowsProfile
from ._models_py3 import ManagedServiceIdentityUserAssignedIdentitiesValue
from ._models_py3 import NetworkProfileForSnapshot
from ._models_py3 import OSOptionProfile
from ._models_py3 import OSOptionProperty
from ._models_py3 import OperationListResult
from ._models_py3 import OperationValue
from ._models_py3 import OutboundEnvironmentEndpoint
from ._models_py3 import OutboundEnvironmentEndpointCollection
from ._models_py3 import PowerState
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourcesListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import Resource
from ._models_py3 import ResourceReference
from ._models_py3 import RunCommandRequest
from ._models_py3 import RunCommandResult
from ._models_py3 import Snapshot
from ._models_py3 import SnapshotListResult
from ._models_py3 import SubResource
from ._models_py3 import SysctlConfig
from ._models_py3 import SystemData
from ._models_py3 import TagsObject
from ._models_py3 import TimeInWeek
from ._models_py3 import TimeSpan
from ._models_py3 import TrackedResource
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import WindowsGmsaProfile
from ._container_service_client_enums import AgentPoolMode
from ._container_service_client_enums import AgentPoolType
from ._container_service_client_enums import Code
from ._container_service_client_enums import ConnectionStatus
from ._container_service_client_enums import ContainerServiceStorageProfileTypes
from ._container_service_client_enums import ContainerServiceVMSizeTypes
from ._container_service_client_enums import Count
from ._container_service_client_enums import CreatedByType
from ._container_service_client_enums import Expander
from ._container_service_client_enums import ExtendedLocationTypes
from ._container_service_client_enums import Format
from ._container_service_client_enums import GPUInstanceProfile
from ._container_service_client_enums import IpFamily
from ._container_service_client_enums import KubeletDiskType
from ._container_service_client_enums import LicenseType
from ._container_service_client_enums import LoadBalancerSku
from ._container_service_client_enums import ManagedClusterPodIdentityProvisioningState
from ._container_service_client_enums import ManagedClusterSKUName
from ._container_service_client_enums import ManagedClusterSKUTier
from ._container_service_client_enums import NetworkMode
from ._container_service_client_enums import NetworkPlugin
from ._container_service_client_enums import NetworkPolicy
from ._container_service_client_enums import OSDiskType
from ._container_service_client_enums import OSSKU
from ._container_service_client_enums import OSType
from ._container_service_client_enums import OutboundType
from ._container_service_client_enums import PrivateEndpointConnectionProvisioningState
from ._container_service_client_enums import PublicNetworkAccess
from ._container_service_client_enums import ResourceIdentityType
from ._container_service_client_enums import ScaleDownMode
from ._container_service_client_enums import ScaleSetEvictionPolicy
from ._container_service_client_enums import ScaleSetPriority
from ._container_service_client_enums import SnapshotType
from ._container_service_client_enums import UpgradeChannel
from ._container_service_client_enums import WeekDay
from ._container_service_client_enums import WorkloadRuntime
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AgentPool",
"AgentPoolAvailableVersions",
"AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem",
"AgentPoolListResult",
"AgentPoolUpgradeProfile",
"AgentPoolUpgradeProfilePropertiesUpgradesItem",
"AgentPoolUpgradeSettings",
"AzureKeyVaultKms",
"CloudErrorBody",
"ContainerServiceDiagnosticsProfile",
"ContainerServiceLinuxProfile",
"ContainerServiceMasterProfile",
"ContainerServiceNetworkProfile",
"ContainerServiceSshConfiguration",
"ContainerServiceSshPublicKey",
"ContainerServiceVMDiagnostics",
"CreationData",
"CredentialResult",
"CredentialResults",
"EndpointDependency",
"EndpointDetail",
"ExtendedLocation",
"KubeletConfig",
"LinuxOSConfig",
"MaintenanceConfiguration",
"MaintenanceConfigurationListResult",
"ManagedCluster",
"ManagedClusterAADProfile",
"ManagedClusterAPIServerAccessProfile",
"ManagedClusterAccessProfile",
"ManagedClusterAddonProfile",
"ManagedClusterAddonProfileIdentity",
"ManagedClusterAgentPoolProfile",
"ManagedClusterAgentPoolProfileProperties",
"ManagedClusterAutoUpgradeProfile",
"ManagedClusterHTTPProxyConfig",
"ManagedClusterIdentity",
"ManagedClusterIngressProfile",
"ManagedClusterIngressProfileWebAppRouting",
"ManagedClusterListResult",
"ManagedClusterLoadBalancerProfile",
"ManagedClusterLoadBalancerProfileManagedOutboundIPs",
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes",
"ManagedClusterLoadBalancerProfileOutboundIPs",
"ManagedClusterManagedOutboundIPProfile",
"ManagedClusterNATGatewayProfile",
"ManagedClusterOIDCIssuerProfile",
"ManagedClusterPodIdentity",
"ManagedClusterPodIdentityException",
"ManagedClusterPodIdentityProfile",
"ManagedClusterPodIdentityProvisioningError",
"ManagedClusterPodIdentityProvisioningErrorBody",
"ManagedClusterPodIdentityProvisioningInfo",
"ManagedClusterPoolUpgradeProfile",
"ManagedClusterPoolUpgradeProfileUpgradesItem",
"ManagedClusterPropertiesAutoScalerProfile",
"ManagedClusterPropertiesForSnapshot",
"ManagedClusterSKU",
"ManagedClusterSecurityProfile",
"ManagedClusterSecurityProfileAzureDefender",
"ManagedClusterSecurityProfileWorkloadIdentity",
"ManagedClusterServicePrincipalProfile",
"ManagedClusterSnapshot",
"ManagedClusterSnapshotListResult",
"ManagedClusterStorageProfile",
"ManagedClusterStorageProfileDiskCSIDriver",
"ManagedClusterStorageProfileFileCSIDriver",
"ManagedClusterStorageProfileSnapshotController",
"ManagedClusterUpgradeProfile",
"ManagedClusterWindowsProfile",
"ManagedServiceIdentityUserAssignedIdentitiesValue",
"NetworkProfileForSnapshot",
"OSOptionProfile",
"OSOptionProperty",
"OperationListResult",
"OperationValue",
"OutboundEnvironmentEndpoint",
"OutboundEnvironmentEndpointCollection",
"PowerState",
"PrivateEndpoint",
"PrivateEndpointConnection",
"PrivateEndpointConnectionListResult",
"PrivateLinkResource",
"PrivateLinkResourcesListResult",
"PrivateLinkServiceConnectionState",
"Resource",
"ResourceReference",
"RunCommandRequest",
"RunCommandResult",
"Snapshot",
"SnapshotListResult",
"SubResource",
"SysctlConfig",
"SystemData",
"TagsObject",
"TimeInWeek",
"TimeSpan",
"TrackedResource",
"UserAssignedIdentity",
"WindowsGmsaProfile",
"AgentPoolMode",
"AgentPoolType",
"Code",
"ConnectionStatus",
"ContainerServiceStorageProfileTypes",
"ContainerServiceVMSizeTypes",
"Count",
"CreatedByType",
"Expander",
"ExtendedLocationTypes",
"Format",
"GPUInstanceProfile",
"IpFamily",
"KubeletDiskType",
"LicenseType",
"LoadBalancerSku",
"ManagedClusterPodIdentityProvisioningState",
"ManagedClusterSKUName",
"ManagedClusterSKUTier",
"NetworkMode",
"NetworkPlugin",
"NetworkPolicy",
"OSDiskType",
"OSSKU",
"OSType",
"OutboundType",
"PrivateEndpointConnectionProvisioningState",
"PublicNetworkAccess",
"ResourceIdentityType",
"ScaleDownMode",
"ScaleSetEvictionPolicy",
"ScaleSetPriority",
"SnapshotType",
"UpgradeChannel",
"WeekDay",
"WorkloadRuntime",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "c4c786c5445362ee89a3a2a5b5e4ca87",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 87,
"avg_line_length": 43.00355871886121,
"alnum_prop": 0.8178583250579279,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c37cbbf5c7555fc5a480a571f8acf9e2c6cf0600",
"size": "12552",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_03_02_preview/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import ctypes
import struct
from .hash_engine import HashEngine
from .utilities import d2h
HASH_SEED = 0x46E945F8 # static hash seed from app
class HashLibrary(HashEngine):
def __init__(self, library_path):
self._hash_lib = ctypes.cdll.LoadLibrary(library_path)
self._hash_lib.compute_hash.argtypes = (ctypes.POINTER(ctypes.c_ubyte), ctypes.c_uint32)
self._hash_lib.compute_hash.restype = ctypes.c_uint64
def hash(self, timestamp, latitude, longitude, altitude, authticket, sessiondata, requests):
self.location_hash = None
self.location_auth_hash = None
self.request_hashes = []
first_hash = self.hash32(authticket, seed=HASH_SEED)
location_bytes = d2h(latitude) + d2h(longitude) + d2h(altitude)
loc_hash = self.hash32(location_bytes, seed=first_hash)
self.location_auth_hash = ctypes.c_int32(loc_hash).value
loc_hash = self.hash32(location_bytes, seed=HASH_SEED)
self.location_hash = ctypes.c_int32(loc_hash).value
first_hash = self.hash64salt32(authticket, seed=HASH_SEED)
for request in requests:
req_hash = self.hash64salt64(request.SerializeToString(), seed=first_hash)
self.request_hashes.append(ctypes.c_int64(req_hash).value)
def hash64salt32(self, buf, seed):
buf = struct.pack(">I", seed) + buf
return self.call_hash(buf)
def hash64salt64(self, buf, seed):
buf = struct.pack(">Q", seed) + buf
return self.call_hash(buf)
def hash32(self, buf, seed):
buf = struct.pack(">I", seed) + buf
hash64 = self.call_hash(buf)
signedhash64 = ctypes.c_int64(hash64)
return ctypes.c_uint(signedhash64.value).value ^ ctypes.c_uint(signedhash64.value >> 32).value
def call_hash(self, buf):
buf = list(bytearray(buf))
num_bytes = len(buf)
array_type = ctypes.c_ubyte * num_bytes
data = self._hash_lib.compute_hash(array_type(*buf), ctypes.c_uint32(num_bytes))
return ctypes.c_uint64(data).value | {
"content_hash": "5574de88540d50526df43de8745f856e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 102,
"avg_line_length": 39.236363636363635,
"alnum_prop": 0.6399443929564411,
"repo_name": "PokeHunterProject/pogom-updated",
"id": "4a83048e5305a91a5805e0bb799bf020d9b78dd3",
"size": "2158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pogom/pgoapi/hash_library.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "15396"
},
{
"name": "JavaScript",
"bytes": "26129"
},
{
"name": "Python",
"bytes": "113163"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
"""Stable Noisy Optimization by Branch and FIT algorithm (SNOBFIT) optimizer."""
from typing import Any, Dict, Optional, Callable, Tuple, List
import numpy as np
from qiskit.utils import optionals as _optionals
from .optimizer import Optimizer, OptimizerSupportLevel, OptimizerResult, POINT
@_optionals.HAS_SKQUANT.require_in_instance
@_optionals.HAS_SQSNOBFIT.require_in_instance
class SNOBFIT(Optimizer):
"""Stable Noisy Optimization by Branch and FIT algorithm.
SnobFit is used for the optimization of derivative-free, noisy objective functions providing
robust and fast solutions of problems with continuous variables varying within bound.
Uses skquant.opt installed with pip install scikit-quant.
For further detail, please refer to
https://github.com/scikit-quant/scikit-quant and https://qat4chem.lbl.gov/software.
"""
def __init__(
self,
maxiter: int = 1000,
maxfail: int = 10,
maxmp: int = None,
verbose: bool = False,
) -> None:
"""
Args:
maxiter: Maximum number of function evaluations.
maxmp: Maximum number of model points requested for the local fit.
Default = 2 * number of parameters + 6 set to this value when None.
maxfail: Maximum number of failures to improve the solution. Stops the algorithm
after maxfail is reached.
verbose: Provide verbose (debugging) output.
Raises:
MissingOptionalLibraryError: scikit-quant or SQSnobFit not installed
"""
super().__init__()
self._maxiter = maxiter
self._maxfail = maxfail
self._maxmp = maxmp
self._verbose = verbose
def get_support_level(self):
"""Returns support level dictionary."""
return {
"gradient": OptimizerSupportLevel.ignored,
"bounds": OptimizerSupportLevel.required,
"initial_point": OptimizerSupportLevel.required,
}
@property
def settings(self) -> Dict[str, Any]:
return {
"maxiter": self._maxiter,
"maxfail": self._maxfail,
"maxmp": self._maxmp,
"verbose": self._verbose,
}
def minimize(
self,
fun: Callable[[POINT], float],
x0: POINT,
jac: Optional[Callable[[POINT], POINT]] = None,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> OptimizerResult:
import skquant.opt as skq
from SQSnobFit import optset
if bounds is None or any(None in bound_tuple for bound_tuple in bounds):
raise ValueError("Optimizer SNOBFIT requires bounds for all parameters.")
snobfit_settings = {
"maxmp": self._maxmp,
"maxfail": self._maxfail,
"verbose": self._verbose,
}
options = optset(optin=snobfit_settings)
# counters the error when initial point is outside the acceptable bounds
x0 = np.asarray(x0)
for idx, theta in enumerate(x0):
if abs(theta) > bounds[idx][0]:
x0[idx] = x0[idx] % bounds[idx][0]
elif abs(theta) > bounds[idx][1]:
x0[idx] = x0[idx] % bounds[idx][1]
res, history = skq.minimize(
fun,
x0,
bounds=bounds,
budget=self._maxiter,
method="snobfit",
options=options,
)
optimizer_result = OptimizerResult()
optimizer_result.x = res.optpar
optimizer_result.fun = res.optval
optimizer_result.nfev = len(history)
return optimizer_result
| {
"content_hash": "ac958b9553cc11f3bc6530662afac008",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 96,
"avg_line_length": 34.86666666666667,
"alnum_prop": 0.6047527997814804,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "f74d8ca8515395bea9a314b68d7dd67db2c44c68",
"size": "4145",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/algorithms/optimizers/snobfit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
"""
prompt_toolkit
==============
Author: Jonathan Slenders
Description: prompt_toolkit is a Library for building powerful interactive
command lines in Python. It can be a replacement for GNU
readline, but it can be much more than that.
See the examples directory to learn about the usage.
Probably, to get started, you meight also want to have a look at
`prompt_toolkit.shortcuts.prompt`.
"""
from .interface import CommandLineInterface
from .application import AbortAction, Application
from .shortcuts import prompt, prompt_async
# Don't forget to update in `docs/conf.py`!
__version__ = '1.0.9'
| {
"content_hash": "85080fc92dcfe491f65a56350d4e8ab6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 28.681818181818183,
"alnum_prop": 0.7242472266244057,
"repo_name": "lancezlin/ml_template_py",
"id": "a8d02038d5e7071110665285450d04182ce3c5f9",
"size": "631",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/prompt_toolkit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
} |
"""Log entries within the Google Stackdriver Logging API."""
import json
import re
from google.protobuf import any_pb2
from google.protobuf.json_format import Parse
from google.cloud._helpers import _name_from_project_path
from google.cloud._helpers import _rfc3339_nanos_to_datetime
_LOGGER_TEMPLATE = re.compile(r"""
projects/ # static prefix
(?P<project>[^/]+) # initial letter, wordchars + hyphen
/logs/ # static midfix
(?P<name>[^/]+) # initial letter, wordchars + allowed punc
""", re.VERBOSE)
def logger_name_from_path(path):
"""Validate a logger URI path and get the logger name.
:type path: str
:param path: URI path for a logger API request.
:rtype: str
:returns: Logger name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
class _BaseEntry(object):
"""Base class for TextEntry, StructEntry, ProtobufEntry.
:type payload: text or dict
:param payload: The payload passed as ``textPayload``, ``jsonPayload``,
or ``protoPayload``.
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
:type insert_id: text
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry
"""
def __init__(self, payload, logger, insert_id=None, timestamp=None,
labels=None, severity=None, http_request=None):
self.payload = payload
self.logger = logger
self.insert_id = insert_id
self.timestamp = timestamp
self.labels = labels
self.severity = severity
self.http_request = http_request
@classmethod
def from_api_repr(cls, resource, client, loggers=None):
"""Factory: construct an entry given its API representation
:type resource: dict
:param resource: text entry resource representation returned from
the API
:type client: :class:`google.cloud.logging.client.Client`
:param client: Client which holds credentials and project
configuration.
:type loggers: dict
:param loggers:
(Optional) A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: :class:`google.cloud.logging.entries._BaseEntry`
:returns: Text entry parsed from ``resource``.
"""
if loggers is None:
loggers = {}
logger_fullname = resource['logName']
logger = loggers.get(logger_fullname)
if logger is None:
logger_name = logger_name_from_path(logger_fullname)
logger = loggers[logger_fullname] = client.logger(logger_name)
payload = resource[cls._PAYLOAD_KEY]
insert_id = resource.get('insertId')
timestamp = resource.get('timestamp')
if timestamp is not None:
timestamp = _rfc3339_nanos_to_datetime(timestamp)
labels = resource.get('labels')
severity = resource.get('severity')
http_request = resource.get('httpRequest')
return cls(payload, logger, insert_id=insert_id, timestamp=timestamp,
labels=labels, severity=severity, http_request=http_request)
class TextEntry(_BaseEntry):
"""Entry created with ``textPayload``.
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
_PAYLOAD_KEY = 'textPayload'
class StructEntry(_BaseEntry):
"""Entry created with ``jsonPayload``.
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
_PAYLOAD_KEY = 'jsonPayload'
class ProtobufEntry(_BaseEntry):
"""Entry created with ``protoPayload``.
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
:type payload: str, dict or any_pb2.Any
:param payload: The payload passed as ``textPayload``, ``jsonPayload``,
or ``protoPayload``. This also may be passed as a raw
:class:`.any_pb2.Any` if the ``protoPayload`` could
not be deserialized.
:type logger: :class:`~google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
:type insert_id: str
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry
"""
_PAYLOAD_KEY = 'protoPayload'
def __init__(self, payload, logger, insert_id=None, timestamp=None,
labels=None, severity=None, http_request=None):
super(ProtobufEntry, self).__init__(
payload, logger, insert_id=insert_id, timestamp=timestamp,
labels=labels, severity=severity, http_request=http_request)
if isinstance(self.payload, any_pb2.Any):
self.payload_pb = self.payload
self.payload = None
else:
self.payload_pb = None
def parse_message(self, message):
"""Parse payload into a protobuf message.
Mutates the passed-in ``message`` in place.
:type message: Protobuf message
:param message: the message to be logged
"""
# NOTE: This assumes that ``payload`` is already a deserialized
# ``Any`` field and ``message`` has come from an imported
# ``pb2`` module with the relevant protobuf message type.
Parse(json.dumps(self.payload), message)
| {
"content_hash": "b9d7395dd9beabbe70b3c6ee48da0c6a",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 35.13586956521739,
"alnum_prop": 0.6334106728538283,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "284562c5de5be5bda5c02a2c53f9bbeea0d58209",
"size": "7041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/google/cloud/logging/entries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""Integrates distutils/setuptools with Paver."""
import re
import os
import sys
from os.path import *
from fnmatch import fnmatchcase
from distutils.util import convert_path
from distutils import log
try:
from setuptools import dist
except ImportError:
from distutils import dist
from distutils.errors import DistutilsModuleError
_Distribution = dist.Distribution
from paver.deps.six import print_
from paver.options import Bunch
try:
import setuptools
import pkg_resources
has_setuptools = True
except ImportError:
has_setuptools = False
# our commands can have '.' in them, so we'll monkeypatch this
# expression
dist.command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_\.]*)$')
from paver import tasks
__ALL__ = ['find_package_data']
# find_package_data is an Ian Bicking creation.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def _dispatch_setuptools_install(distribution, command_name):
"""
setuptools hack:
- handle dependencies in `install_requires` by calling fetch_build_eggs()
"""
#check if it has some requirements and try to install them
if distribution.install_requires:
try:
distribution.fetch_build_eggs(distribution.install_requires)
except Exception:
pass
#run command
distribution.run_command(command_name)
# storage of extra dispatchers for distutils/setuptools commands
_extra_command_dispatch = {
'setuptools.command.install.install': _dispatch_setuptools_install,
}
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = join(where, name)
if isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print_("Directory %s ignored by pattern %s"
% (fn, pattern), file=sys.stderr)
break
if bad_name:
continue
if isfile(join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print_("File %s ignored by pattern %s"
% (fn, pattern), file=sys.stderr)
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
class DistutilsTask(tasks.Task):
def __init__(self, distribution, command_name, command_class):
name_sections = str(command_class).split(".")
if name_sections[-2] == name_sections[-1]:
del name_sections[-2]
self.name = ".".join(name_sections)
self.__name__ = self.name
self.distribution = distribution
self.command_name = command_name
self.shortname = _get_shortname(command_name)
self.command_class = command_class
self.option_names = set()
self.needs = []
self.might_call = []
self._parser = None
self.share_options_with = []
self.user_options = command_class.user_options
self.negative_opt = getattr(command_class, "negative_opt", {})
# Parse distutils config files.
distribution.parse_config_files()
def __call__(self, *args, **kw):
options = tasks.environment.options.get(self.shortname, {})
opt_dict = self.distribution.get_option_dict(self.command_name)
for (name, value) in options.items():
opt_dict[name.replace('-', '_')] = ("command line", value)
# see if we don't have extra dispatcher for command
cmd_class = str(self.command_class)
if cmd_class in _extra_command_dispatch:
_extra_command_dispatch[cmd_class](self.distribution, self.command_name)
else:
self.distribution.run_command(self.command_name)
@property
def description(self):
return self.command_class.description
def _get_shortname(taskname):
dotindex = taskname.rfind(".")
if dotindex > -1:
command_name = taskname[dotindex+1:]
else:
command_name = taskname
return command_name
class DistutilsTaskFinder(object):
def get_task(self, taskname):
dist = _get_distribution()
environ = tasks.environment
dist.command_packages = getattr(environ, 'command_packages', None)
command_name = _get_shortname(taskname)
try:
command_class = dist.get_command_class(command_name)
except DistutilsModuleError:
return None
return DistutilsTask(dist, command_name, command_class)
def get_tasks(self):
dist = _get_distribution()
if has_setuptools:
for ep in pkg_resources.iter_entry_points('distutils.commands'):
try:
cmdclass = ep.load(False) # don't require extras, we're not running
dist.cmdclass[ep.name] = cmdclass
except:
# on the Mac, at least, installing from the tarball
# via zc.buildout fails due to a problem in the
# py2app command
tasks.environment.info("Could not load entry point: %s", ep)
dist.get_command_list()
return set(DistutilsTask(dist, key, value)
for key, value in dist.cmdclass.items())
def _get_distribution():
try:
return tasks.environment.distribution
except AttributeError:
dist = _Distribution(attrs=tasks.environment.options.get('setup', {}))
tasks.environment.distribution = dist
dist.script_name = tasks.environment.pavement_file
return dist
def install_distutils_tasks():
"""Makes distutils and setuptools commands available as Paver tasks."""
env = tasks.environment
if not hasattr(env, "_distutils_tasks_installed"):
env.task_finders.append(DistutilsTaskFinder())
env._distutils_tasks_installed = True
def setup(**kw):
"""Updates options.setup with the keyword arguments provided,
and installs the distutils tasks for this pavement. You can
use paver.setuputils.setup as a direct replacement for
the distutils.core.setup or setuptools.setup in a traditional
setup.py."""
install_distutils_tasks()
setup_section = tasks.environment.options.setdefault("setup", Bunch())
setup_section.update(kw)
def _error(message, *args):
"""Displays an error message to the user."""
tasks.environment.error(message, *args)
def _info(message, *args):
"""Displays a message to the user. If the quiet option is specified, the
message will not be displayed."""
tasks.environment.info(message, *args)
def _debug(message, *args):
"""Displays a message to the user, but only if the verbose flag is
set."""
tasks.environment.debug(message, *args)
def _base_log(level, message, *args):
"""Displays a message at the given log level"""
tasks.environment._log(level, message, args)
# monkeypatch the distutils logging to go through Paver's logging
log.log = _base_log
log.debug = _debug
log.info = _info
log.warn = _error
log.error = _error
log.fatal = _error
if has_setuptools:
__ALL__.extend(["find_packages"])
from setuptools import find_packages
else:
import distutils.core
| {
"content_hash": "5efe709055c0916ec2b332147de71b63",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 87,
"avg_line_length": 34.84782608695652,
"alnum_prop": 0.6048034934497817,
"repo_name": "ArcherSys/ArcherSys",
"id": "5d744467b9d308d29d933959049a7a3a5f36713d",
"size": "9618",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Lib/site-packages/paver/setuputils.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
__author__ = 'Shamal Faily, Robin Quetin'
from .Borg import Borg
import os
import logging
import json
from cairis.tools.GraphicsGenerator import GraphicsGenerator
from .MySQLDatabaseProxy import MySQLDatabaseProxy
from .ARM import ARMException
def testUploadDirectory(uploadDir,logger):
image_upload_dir = os.path.join(uploadDir, 'images')
if os.path.exists(image_upload_dir):
try:
test_file = os.path.join(image_upload_dir, 'test.txt')
fs_test = open(test_file, 'wb')
fs_test.write('test'.encode('utf-8'))
fs_test.close()
os.remove(test_file)
except IOError:
err_msg = 'The upload directory for images is not writeable. Image uploading will propably not work.'
logger.warning(err_msg)
else:
try:
os.mkdir(image_upload_dir, 0o775)
except IOError:
err_msg = 'Unable to create directory to store images into. Image uploading will probably not work.'
logger.warning(err_msg)
def parseConfigFile():
b = Borg()
cfgFileName = ''
try:
cfgFileName = os.environ['CAIRIS_CFG']
except KeyError:
raise ARMException('CAIRIS_CFG environment variable has not been set. Please set this to the correct location of your CAIRIS configuration file, e.g. export CAIRIS_CFG=/home/cairisuser/cairis.cnf')
if not os.path.exists(cfgFileName):
raise ARMException('Unable to locate configuration file at the following location:' + cfgFileName)
cfgDict = {}
cfgFile = open(cfgFileName)
for cfgLine in cfgFile.readlines():
cfgTuple = cfgLine.split('=')
if len(cfgTuple) != 2:
pass
else:
cfgDict[cfgTuple[0].strip()] = cfgTuple[1].strip()
cfgFile.close()
return cfgDict
def initialiseCairisDbSettings(cfgDict):
b = Borg()
b.dbHost = cfgDict['dbhost']
b.dbPort = int(cfgDict['dbport'])
b.dbUser = 'cairis_test'
b.dbPasswd = 'cairis_test'
b.dbName = 'cairis_test_default'
b.tmpDir = cfgDict['tmp_dir']
b.cairisRoot = cfgDict['root']
b.imageDir = os.path.abspath(cfgDict['default_image_dir'])
b.rPasswd = ''
b.docker = True if 'docker' in cfgDict else False
try:
b.rPasswd = cfgDict['rpasswd']
except KeyError:
pass
def setupDocBookConfig():
b = Borg()
b.docBookDir = 'http://www.docbook.org/sgml/4.5'
if os.path.exists('/usr/share/sgml/docbook/dtd/4.5') or b.docker:
b.docBookDir = '/usr/share/sgml/docbook/dtd/4.5'
else:
b.logger.warning('Unable to find DocBook schemes. Check if DocBook is correctly installed.')
tf = open(b.configDir + '/sizes.json')
b.objtSizes = json.load(tf)
tf.close()
def initialiseDesktopSettings():
b = Borg()
pSettings = b.dbProxy.getProjectSettings()
b.fontSize = pSettings['Font Size']
b.apFontSize = pSettings['AP Font Size']
b.fontName = pSettings['Font Name']
b.mainFrame = None
def initialise(user='cairis_test',db='cairis_test_default'):
cfgDict = parseConfigFile()
initialiseCairisDbSettings(cfgDict)
b = Borg()
b.runmode = 'desktop'
logging.basicConfig()
b.logger = logging.getLogger('cairis_gui')
b.iconDir = b.cairisRoot + '/images'
b.configDir = b.cairisRoot + '/config'
setupDocBookConfig()
from cairis.gui.GUIDatabaseProxy import GUIDatabaseProxy
dbPasswd = ''
if (user == 'cairis_test'):
dbPasswd = 'cairis_test'
db='cairis_test_default'
else:
b.dbUser = user
b.dbPasswd = ''
b.dbName = db
b.dbProxy = GUIDatabaseProxy(user=user,passwd=dbPasswd,db=db)
initialiseDesktopSettings()
def dInitialise(withTest = True):
cfgDict = parseConfigFile()
initialiseCairisDbSettings(cfgDict)
b = Borg()
b.runmode = 'web'
logging.basicConfig()
b.logger = logging.getLogger('cairisd')
b.configDir = os.path.join(b.cairisRoot,'config')
b.uploadDir = cfgDict['upload_dir']
b.secretKey = cfgDict['secret_key']
b.passwordHash = cfgDict['password_hash']
b.passwordSalt = cfgDict['password_salt']
try:
b.webPort = int(cfgDict['web_port'])
except TypeError as ex:
b.logger.error(str(ex.message))
if cfgDict['log_level'].lower() == 'debug': b.logLevel = logging.DEBUG
elif cfgDict['log_level'].lower() == 'none': b.logLevel = logging.FATAL
elif cfgDict['log_level'].lower() == 'info': b.logLevel = logging.INFO
elif cfgDict['log_level'].lower() == 'error': b.logLevel = logging.ERROR
else:
b.logLevel = logging.WARNING
b.staticDir = cfgDict['web_static_dir']
if ('web_asset_dir' not in cfgDict):
b.assetDir = b.staticDir
else:
b.assetDir = cfgDict['web_asset_dir']
b.templateDir = os.path.join(b.cairisRoot,'templates')
if not hasattr(b, 'uploadDir'): b.uploadDir = os.path.join(b.cairisRoot,'cairis/static')
paths = {
'root': b.cairisRoot,
'image': b.imageDir,
'configuration files': b.configDir,
'template files': b.templateDir,
'upload': b.uploadDir
}
for key, path in list(paths.items()):
if not os.path.exists(path):
err_msg = 'The {0} directory of CAIRIS is inaccessible or not existing.{1}Path: {2}'.format(key, os.linesep, path)
b.logger.error(err_msg)
exit(6)
testUploadDirectory(b.uploadDir,b.logger)
b.model_generator = GraphicsGenerator('svg')
b.settings = dict()
b.settings['test'] = {
'session_id': 'test',
'fontSize': '13',
'fontName': 'Times New Roman',
'jsonPrettyPrint': True,
'apFontSize': '7.5',
'dbUser': 'cairis_test',
'userName' : 'CAIRIS test user account',
'dbPasswd' : 'cairis_test',
'dbName' : 'cairis_test_default',
'dbHost': b.dbHost,
'dbPort': b.dbPort,
'rPasswd': b.rPasswd
}
db_proxy = MySQLDatabaseProxy(
host = b.settings['test']['dbHost'],
port = b.settings['test']['dbPort'],
user = b.settings['test']['dbUser'],
passwd = b.settings['test']['dbPasswd'],
db = b.settings['test']['dbName'])
if db_proxy.conn is not None:
db_proxy.close()
b.settings['test']['dbProxy'] = db_proxy
b.dbProxy = db_proxy
b.fontSize = '13'
b.apFontSize = '7.5'
b.fontName = 'Times New Roman'
setupDocBookConfig()
| {
"content_hash": "a918d2e1ddb5f17573ab40e4c15c9d98",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 203,
"avg_line_length": 30.231155778894472,
"alnum_prop": 0.6712101063829787,
"repo_name": "nathanbjenx/cairis",
"id": "d5d29a3c80e419067675e7aac86a559291bd4a91",
"size": "6814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cairis/core/BorgFactory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588306"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "Gherkin",
"bytes": "1615"
},
{
"name": "HTML",
"bytes": "1664076"
},
{
"name": "JavaScript",
"bytes": "416319"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "PLpgSQL",
"bytes": "1494775"
},
{
"name": "Python",
"bytes": "4006311"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.timezone import now
from django_esutils.models import ESManager
class User(AbstractUser):
language = models.CharField(max_length=10, default='fr')
class Category(models.Model):
name = models.CharField(max_length=128)
class Library(models.Model):
name = models.CharField(max_length=128)
number_of_books = models.IntegerField(default=0)
ARTICLE_STATUSES = (
(0, 'draft'),
(1, 'new'),
(2, 'online'),
(3, 'api')
)
class Article(models.Model):
author = models.ForeignKey(User, related_name='author')
contributors = models.ManyToManyField(User, related_name='contributors')
created_at = models.DateTimeField(default=now)
updated_at = models.DateTimeField(auto_now=True)
category = models.ForeignKey(Category, blank=True, null=True)
library = models.ForeignKey(Library, blank=True, null=True)
subject = models.CharField(max_length=256)
content = models.TextField(blank=True, default='')
status = models.IntegerField(choices=ARTICLE_STATUSES,
default=0)
objects = ESManager()
| {
"content_hash": "d71cd30ba16dc926a1ec44d6ed7325f7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 24.3265306122449,
"alnum_prop": 0.6946308724832215,
"repo_name": "novafloss/django-esutils",
"id": "9c60606e04889064d4b1104a6059715ec20aeb6a",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo_esutils/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "307"
},
{
"name": "Python",
"bytes": "59265"
},
{
"name": "Shell",
"bytes": "720"
}
],
"symlink_target": ""
} |
"""A simple reader for file segs produced by GCS output writer."""
from google.appengine.ext.mapreduce import output_writers
try:
from google.appengine._internal import cloudstorage
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
pass
class _GCSFileSegReader(object):
"""A simple reader for file segs produced by GCS output writer.
Internal use only.
This reader conforms to Python stream interface.
"""
def __init__(self, seg_prefix, last_seg_index):
"""Init.
Instances are pickle safe.
Args:
seg_prefix: filename prefix for all segs. It is expected
seg_prefix + index = seg filename.
last_seg_index: the last index of all segs. int.
"""
self._EOF = False
self._offset = 0
self._seg_prefix = seg_prefix
self._last_seg_index = last_seg_index
self._seg_index = -1
self._seg_valid_length = None
self._seg = None
self._next_seg()
def read(self, n):
"""Read data from file segs.
Args:
n: max bytes to read. Must be positive.
Returns:
some bytes. May be smaller than n bytes. "" when no more data is left.
"""
if self._EOF:
return ""
while self._seg_index <= self._last_seg_index:
result = self._read_from_seg(n)
if result != "":
return result
else:
self._next_seg()
self._EOF = True
return ""
def close(self):
if self._seg:
self._seg.close()
def tell(self):
"""Returns the next offset to read."""
return self._offset
def _next_seg(self):
"""Get next seg."""
if self._seg:
self._seg.close()
self._seg_index += 1
if self._seg_index > self._last_seg_index:
self._seg = None
return
filename = self._seg_prefix + str(self._seg_index)
stat = cloudstorage.stat(filename)
writer = output_writers._GoogleCloudStorageOutputWriter
if writer._VALID_LENGTH not in stat.metadata:
raise ValueError(
"Expect %s in metadata for file %s." %
(writer._VALID_LENGTH, filename))
self._seg_valid_length = int(stat.metadata[writer._VALID_LENGTH])
if self._seg_valid_length > stat.st_size:
raise ValueError(
"Valid length %s is too big for file %s of length %s" %
(self._seg_valid_length, filename, stat.st_size))
self._seg = cloudstorage.open(filename)
def _read_from_seg(self, n):
"""Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left.
"""
result = self._seg.read(size=n)
if result == "":
return result
offset = self._seg.tell()
if offset > self._seg_valid_length:
extra = offset - self._seg_valid_length
result = result[:-1*extra]
self._offset += len(result)
return result
| {
"content_hash": "88115c3563845dcdc48da681ccc4c587",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 76,
"avg_line_length": 24.25423728813559,
"alnum_prop": 0.6160027952480782,
"repo_name": "GoogleCloudPlatform/python-compat-runtime",
"id": "a8ebd93809e1bdac58a5587af3884cd7aaccc111",
"size": "3463",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "appengine-compat/exported_appengine_sdk/google/appengine/ext/mapreduce/tools/gcs_file_seg_reader.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30211"
},
{
"name": "HTML",
"bytes": "171272"
},
{
"name": "JavaScript",
"bytes": "414229"
},
{
"name": "Makefile",
"bytes": "2138"
},
{
"name": "PHP",
"bytes": "3132250"
},
{
"name": "Python",
"bytes": "11709249"
},
{
"name": "Shell",
"bytes": "1787"
}
],
"symlink_target": ""
} |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
dataset = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, y)
# Visualizing the Polymonial Regression results
def viz_polymonial():
plt.scatter(X, y, color="red")
plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue")
plt.title("Truth or Bluff (Linear Regression)")
plt.xlabel("Position level")
plt.ylabel("Salary")
plt.show()
return
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| {
"content_hash": "6c028485a80bd0ca4c5919597e68429d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 88,
"avg_line_length": 28.155555555555555,
"alnum_prop": 0.7166535122336227,
"repo_name": "TheAlgorithms/Python",
"id": "374c35f7f905b2aa6f15b437bc3eb81f4fba5095",
"size": "1267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "machine_learning/polymonial_regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2601694"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Clean unpayed invoces older than one day."
def handle_noargs(self, **options):
from webmoney.models import Invoice
Invoice.objects.filter(
created_on__lt=datetime.utcnow()-timedelta(days=1),
payment__isnull=True).delete()
| {
"content_hash": "7ac4a9889676dbad207e1945ac39f41c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 31.615384615384617,
"alnum_prop": 0.6909975669099757,
"repo_name": "gotlium/django-payment-webmoney",
"id": "63b1cc82fd08cae6eb0acbd48e8371b732405b0b",
"size": "411",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webmoney/management/commands/clean_webmoney.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19495"
}
],
"symlink_target": ""
} |
from time import sleep
import logging
from flaky import flaky
import pytest
from swimpy.routes import ROUTES
from swimpy.model.message import Ping, Ack, PingReq, Alive
from swimpy.model.node import Node
from swimpy.runtime import Runtime
from swimpy.util import send_message
LOGGER = logging.getLogger(__name__)
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_ping():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=1338)
r = Runtime(routes=ROUTES, node=n1)
try:
r.start()
sleep(1)
assert r.is_alive()
ping = Ping(seqno=55, node=n1)
ack = send_message(n1.addr, n1.port, ping, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == ping.seqno
finally:
r.stop()
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_pingreq():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9000)
r1 = Runtime(routes=ROUTES, node=n1)
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9001)
r2 = Runtime(routes=ROUTES, node=n2)
try:
LOGGER.info('Starting node1')
r1.start()
LOGGER.info('Starting node2')
r2.start()
sleep(1)
assert r1.is_alive()
assert r2.is_alive()
LOGGER.info('node1 and node2 are alive')
# Send a ping-req to node-1 for node-2 and wait for an ack
pingreq = PingReq(seqno=101, node=n1, target_node=n2)
ack = send_message(n1.addr, n1.port, pingreq, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == pingreq.seqno
finally:
r1.stop()
r2.stop()
@flaky
@pytest.mark.timeout(15)
@pytest.mark.parametrize('num_nodes,deadline', [
(3, 1),
(12, 7),
])
@pytest.mark.integration()
def test_join(num_nodes, deadline):
"""
Test that we're able to join <num_nodes> into a cluster within <deadline> secs
This *usually* passes, but the flaky decorator will retry in the improbable
case it does fail
"""
nodes = {}
runtimes = {}
port = 10090
for i in xrange(num_nodes):
node_id = 'node-{}'.format(i)
nodes[node_id] = Node(node_id=node_id, addr='127.0.0.1', port=port + i)
runtimes[node_id] = Runtime(routes=ROUTES, node=nodes[node_id])
try:
for runtime in runtimes.values():
runtime.start()
sleep(1)
for node_id, runtime in runtimes.iteritems():
assert runtime.is_alive()
LOGGER.info('{} is alive'.format(node_id))
node_ids = nodes.keys()
for i, node_id in enumerate(node_ids[:-1]):
next_node_id = node_ids[i + 1]
alive = Alive(node=nodes[next_node_id], sender=nodes[next_node_id])
node = nodes[node_id]
send_message(node.addr, node.port, alive)
LOGGER.info('Sleeping for {} seconds'.format(deadline))
sleep(deadline)
for node_id in nodes:
for runtime in runtimes.values():
LOGGER.info('checking if {} is in runtime {}'.format(node_id, runtime.nodes.keys()))
assert node_id in runtime.nodes.keys() # .keys() gives us better debug output
finally:
LOGGER.info('Shutting down runtimes')
for runtime in runtimes.values():
runtime.stop()
@pytest.mark.timeout(15)
@pytest.mark.integration()
def test_join_with_seed_nodes():
# Create three swimpy Runtime objects
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9900)
r1 = Runtime(routes=ROUTES, node=n1)
# Configure a list of seed nodes to send JOINs to on startup
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9901)
r2 = Runtime(routes=ROUTES, node=n2, seed_nodes=[('127.0.0.1', 9900)])
n3 = Node(node_id='node-3', addr='127.0.0.1', port=9902)
r3 = Runtime(routes=ROUTES, node=n3, seed_nodes=[('127.0.0.1', 9901)])
try:
r1.start()
sleep(1)
r2.start()
sleep(1)
r3.start()
sleep(1)
for runtime in [r1, r2, r3]:
nodes_dict = runtime.nodes
LOGGER.info('Checking {} for all three nodes'.format(runtime))
assert sorted(nodes_dict) == ['node-1', 'node-2', 'node-3']
except Exception as e:
LOGGER.exception(e)
finally:
try:
r1.stop()
r2.stop()
r3.stop()
except Exception as e:
LOGGER.exception(e)
raise
| {
"content_hash": "cf8e29d4e683951fdaa11ad0d5a87c5f",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 100,
"avg_line_length": 27.919753086419753,
"alnum_prop": 0.5931903603802786,
"repo_name": "jefffm/swimpy",
"id": "f2a0a8aa3ee83a3c66bcdf84a679dabfc7d21859",
"size": "4523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_integ.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39913"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotAllowed, JsonResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from .frontend.models import Post, Attachment, Newsgroup, ModPriv
from .frontend import util
from . import thumbnail
import email
import traceback
from datetime import datetime
import mimetypes
import os
def frontpage(request):
"""
frontpage for entire webapp
"""
return render(request, 'frontpage.html')
@csrf_exempt
def webhook(request):
"""
endpoint for nntpchan daemon webhook
"""
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
try:
msg = email.message_from_bytes(request.body)
process_message(msg)
except Exception as ex:
traceback.print_exc()
return JsonResponse({ 'error': '{}'.format(ex) })
else:
return JsonResponse({'posted': True})
def process_message(msg):
newsgroup = msg.get('Newsgroups')
if newsgroup is None:
raise Exception("no newsgroup specified")
if not util.newsgroup_valid(newsgroup):
raise Exception("invalid newsgroup name")
bump = True
group, created = Newsgroup.objects.get_or_create(name=newsgroup)
if created:
group.save()
if group.banned:
raise Exception("newsgroup is banned")
msgid = None
for h in ('Message-ID', 'Message-Id', 'MessageId', 'MessageID'):
if h in msg:
msgid = msg[h]
break
# check for sage
if 'X-Sage' in msg and msg['X-Sage'] == '1':
bump = False
if msgid is None:
raise Exception("no message id specified")
elif not util.msgid_valid(msgid):
raise Exception("invalid message id format: {}".format(msgid))
opmsgid = msgid
h = util.hashid(msgid)
atts = list()
ref = msg['References'] or ''
posted = util.time_int(email.utils.parsedate_to_datetime(msg['Date']))
if len(ref) > 0:
opmsgid = ref
f = msg['From'] or 'anon <anon@anon>'
name = email.utils.parseaddr(f)[0]
post, created = Post.objects.get_or_create(defaults={
'posthash': h,
'reference': ref,
'posted': posted,
'last_bumped': 0,
'name': name,
'subject': msg["Subject"] or '',
'newsgroup': group}, msgid=msgid)
if not created:
post.subject = msg["Subject"] or ''
post.name = name
post.posted = posted
m = ''
for part in msg.walk():
ctype = part.get_content_type()
print (ctype)
if ctype.startswith("text/plain"):
m += '{} '.format(part.get_payload(decode=True).decode('utf-8'))
elif ctype.startswith("message/rfc822"):
# signed message
payload = part.get_payload()
if payload is None:
raise Exception('invalid signed message, no body')
for inner in payload:
if not util.verify_message(msg["X-Pubkey-Ed25519"], msg['X-Signature-Ed25519-Sha512'], inner.as_bytes()):
raise Exception('invalid signed message, signature failed')
process_message(inner)
print('processed inner')
else:
payload = part.get_payload(decode=True)
if payload is None:
continue
filename = part.get_filename()
mtype = part.get_content_type()
ext = filename.split('.')[-1].lower()
fh = util.hashfile(bytes(payload))
fn = fh + '.' + ext
fname = os.path.join(settings.MEDIA_ROOT, fn)
if not os.path.exists(fname):
with open(fname, 'wb') as f:
f.write(payload)
tname = os.path.join(settings.MEDIA_ROOT, 'thumb-{}.jpg'.format(fn))
placeholder = os.path.join(settings.ASSETS_ROOT, 'placeholder.jpg')
if not os.path.exists(tname):
thumbnail.generate(fname, tname, placeholder)
att = Attachment(filehash=fh)
att.mimetype = mtype
att.filename = filename
att.save()
atts.append(att)
post.message = m
post.save()
for att in atts:
if post.has_attachment(att.filehash):
continue
post.attachments.add(att)
op, _ = Post.objects.get_or_create(defaults={
'posthash': util.hashid(opmsgid),
'reference': '',
'posted': 0,
'last_bumped': 0,
'name': 'OP',
'subject': 'OP Not Found',
'newsgroup': group}, msgid=opmsgid)
if bump:
op.bump(post.posted)
op.save()
| {
"content_hash": "327f8ef55e1498c1f9e2085d17962413",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 121,
"avg_line_length": 30.3625,
"alnum_prop": 0.5617538081515027,
"repo_name": "majestrate/nntpchan",
"id": "46e09b19eee86e10741cdf6e7463fbe35115877b",
"size": "4858",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/frontends/django/nntpchan/nntpchan/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "85402"
},
{
"name": "CSS",
"bytes": "45454"
},
{
"name": "Emacs Lisp",
"bytes": "506"
},
{
"name": "Go",
"bytes": "732796"
},
{
"name": "HTML",
"bytes": "231849"
},
{
"name": "JavaScript",
"bytes": "124038"
},
{
"name": "Lua",
"bytes": "1641"
},
{
"name": "Makefile",
"bytes": "5325"
},
{
"name": "PHP",
"bytes": "3674"
},
{
"name": "Python",
"bytes": "48997"
},
{
"name": "Shell",
"bytes": "737"
},
{
"name": "VCL",
"bytes": "2010"
}
],
"symlink_target": ""
} |
from ..constants import OK
class Resource(object):
"""
Represents a Resource with a status.
"status" may be one of:
- "ok", the resource is available
- "warning", the resource may not be available
" "error", the resource is unavailable
"""
def __init__(self, name, description, **kwargs):
self.name = name
self.description = description
def check(self):
return {'description': self.description, 'status': OK}
| {
"content_hash": "9ff4cda7a4968156926a8eb0b683541c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 24.94736842105263,
"alnum_prop": 0.6244725738396625,
"repo_name": "TabbedOut/django_canary_endpoint",
"id": "86e99ce03305d3098add04dd02002d4b6c6d6621",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canary_endpoint/resources/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "543"
},
{
"name": "Python",
"bytes": "33171"
}
],
"symlink_target": ""
} |
import boto3
import json
import os
import re
import requests
import six
import threading
from six.moves import range
from girder import events
from girder.models.assetstore import Assetstore
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.models.user import User
from girder.utility import assetstore_utilities
from .. import base
from .. import mongo_replicaset
Chunk1, Chunk2 = ('hello ', 'world')
def setUpModule():
base.startServer(mockS3=True)
def tearDownModule():
base.stopServer()
def _send_s3_request(req, data=None):
req = requests.request(
method=req['method'], url=req['url'], headers=req.get('headers', {}), data=data)
if req.status_code != 200:
raise Exception('Moto S3 request error %d: %s' % (req.status_code, req.text))
return req
class UploadTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = User().createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = User().createUser(**user)
folders = Folder().childFolders(parent=self.user, parentType='user', user=self.user)
for folder in folders:
if folder['public'] is True:
self.folder = folder
def _uploadFile(self, name, partial=False, largeFile=False):
"""
Upload a file either completely or partially.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 0
initializes the upload, 1 uploads 1 chunk, etc. False
to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:returns: the upload record which includes the upload id.
"""
if largeFile:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
else:
chunk1 = Chunk1
chunk2 = Chunk2
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': self.folder['_id'],
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
upload = resp.json
if partial is not False and partial == 0:
return upload
if 's3' not in upload:
fields = [('offset', 0), ('uploadId', upload['_id'])]
files = [('chunk', 'helloWorld.txt', chunk1)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
if partial is not False:
return resp.json
fields = [('offset', len(chunk1)), ('uploadId', upload['_id'])]
files = [('chunk', 'helloWorld.txt', chunk2)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
return upload
# s3 uses a different method for uploading chunks
# This has no error checking at all
if not upload['s3']['chunked']:
_send_s3_request(upload['s3']['request'], chunk1+chunk2)
if partial is not False:
return
else:
chunk1 = chunk1+chunk2
s3resp = _send_s3_request(upload['s3']['request'])
matches = re.search('<UploadId>(.*)</UploadId>', s3resp.text)
s3uploadId = matches.groups()[0]
offset = 0
chunkN = 1
etags = []
while len(chunk1):
params = {'offset': offset, 'uploadId': upload['_id']}
params['chunk'] = json.dumps({'s3UploadId': s3uploadId,
'partNumber': chunkN})
resp = self.request(
path='/file/chunk', method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
upload = resp.json
if len(chunk1) > upload['s3']['chunkLength']:
chunk2 = chunk1[upload['s3']['chunkLength']:]
chunk1 = chunk1[:upload['s3']['chunkLength']]
else:
chunk2 = ""
resp = _send_s3_request(upload['s3']['request'], chunk1)
etags.append(resp.headers['ETag'])
chunk1 = chunk2
if partial is not False:
partial -= 1
chunkN += 1
if partial is not False and not partial:
return upload
resp = self.request(
path='/file/completion', method='POST', user=self.user,
params={'uploadId': upload['_id']})
self.assertStatusOk(resp)
if 's3FinalizeRequest' in resp.json:
xml = '<CompleteMultipartUpload>'
for i, tag in enumerate(etags, 1):
xml += '<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>' % (i, tag)
xml += '</CompleteMultipartUpload>'
_send_s3_request(resp.json['s3FinalizeRequest'], data=xml)
return upload
def _uploadFileWithInitialChunk(self, name, partial=False, largeFile=False, oneChunk=False):
"""
Upload a file either completely or partially, sending the first chunk
with the initial POST.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 1
uploads 1 chunk. False to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:param oneChunk: if True, upload everything as one chunk. Otherwise,
upload one chunk when creating the upload and one via the
file/chunk endpoint.
:returns: the upload record which includes the upload id.
"""
if not largeFile:
chunk1 = Chunk1
chunk2 = Chunk2
else:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
if oneChunk:
chunk1 += chunk2
chunk2 = ''
params = {
'parentType': 'folder',
'parentId': str(self.folder['_id']),
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain',
}
resp = self.request(
path='/file', method='POST', user=self.user,
params=params, body=chunk1, type='text/plain')
self.assertStatusOk(resp)
if partial is not False:
return resp.json
if not oneChunk:
upload = resp.json
params = {'offset': len(chunk1), 'uploadId': upload['_id']}
resp = self.request(
path='/file/chunk', method='POST', user=self.user,
params=params, body=chunk2, type='text/plain')
self.assertStatusOk(resp)
else:
upload = None
self.assertEqual(resp.json['_modelType'], 'file')
return upload
def _testUpload(self):
"""
Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that we
can delete partial uploads that are older than a certain date.
"""
completeUpload = self._uploadFile('complete_upload')
# test uploading large files and one-chunk files
self._uploadFile('complete_large_upload', largeFile=True)
self._uploadFileWithInitialChunk('one_chunk_upload', oneChunk=True)
# test partial uploads
partialUploads = []
for largeFile in (False, True):
for partial in range(3):
partialUploads.append(self._uploadFile(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# The admin user should see all of the partial uploads, but not the
# complete uploads
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# We shouldn't be able to delete a completed upload
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin,
params={'uploadId': completeUpload['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by id
resp = self.request(
path='/system/uploads', user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# The admin should be able to ask for a partial upload by assetstore id
resp = self.request(
path='/system/uploads', user=self.admin,
params={'assetstoreId': self.assetstore['_id']})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by age.
# Everything should be more than 0 days old
resp = self.request(
path='/system/uploads', user=self.admin, params={'minimumAge': 0})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to delete an upload
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# We should now have one less partial upload
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads)-1)
# If we ask to delete everything more than one day old, nothing should
# be deleted.
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin, params={'minimumAge': 1})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Delete all partial uploads
resp = self.request(path='/system/uploads', method='DELETE', user=self.admin)
self.assertStatusOk(resp)
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(resp.json, [])
def testUploadWithInitialChunk(self):
"""
Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that we
can delete partial uploads that are older than a certain date.
"""
self._uploadFileWithInitialChunk('upload1')
self._uploadFileWithInitialChunk('upload2', oneChunk=True)
# test uploading large files
self._uploadFileWithInitialChunk('upload3', largeFile=True)
partialUploads = []
for largeFile in (False, True):
for partial in range(1, 3):
partialUploads.append(self._uploadFileWithInitialChunk(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# check that a user cannot list partial uploads
resp = self.request(path='/system/uploads', method='GET',
user=self.user)
self.assertStatus(resp, 403)
# The admin user should see all of the partial uploads, but not the
# complete upload
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertStatusOk(resp)
foundUploads = resp.json
self.assertEqual(len(foundUploads), len(partialUploads))
# Check that the upload model is saved when we are using one chunk
self._uploadWasSaved = 0
def trackUploads(*args, **kwargs):
self._uploadWasSaved += 1
events.bind('model.upload.save', 'uploadWithInitialChunk', trackUploads)
self._uploadFileWithInitialChunk('upload4', oneChunk=True)
# This can be changed to assertEqual if one chunk uploads aren't saved
self.assertGreater(self._uploadWasSaved, 0)
self._uploadWasSaved = 0
# But that it is saved when using multiple chunks
self._uploadFileWithInitialChunk('upload5')
self.assertGreater(self._uploadWasSaved, 0)
events.unbind('model.upload.save', 'uploadWithInitialChunk')
def testFilesystemAssetstoreUpload(self):
self._testUpload()
# Test that a delete during an upload still results in one file
adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore)
size = 101
data = six.BytesIO(b' ' * size)
files = []
files.append(Upload().uploadFromFile(
data, size, 'progress', parentType='folder', parent=self.folder,
assetstore=self.assetstore))
fullPath0 = adapter.fullPath(files[0])
conditionRemoveDone = threading.Condition()
conditionInEvent = threading.Condition()
def waitForCondition(*args, **kwargs):
# Single that we are in the event and then wait to be told that
# the delete has occured before returning.
with conditionInEvent:
conditionInEvent.notify()
with conditionRemoveDone:
conditionRemoveDone.wait()
def uploadFileWithWait():
size = 101
data = six.BytesIO(b' ' * size)
files.append(Upload().uploadFromFile(
data, size, 'progress', parentType='folder', parent=self.folder,
assetstore=self.assetstore))
events.bind('model.file.finalizeUpload.before', 'waitForCondition',
waitForCondition)
# We create an upload that is bound to an event that waits during the
# finalizeUpload.before event so that the remove will be executed
# during this time.
with conditionInEvent:
t = threading.Thread(target=uploadFileWithWait)
t.start()
conditionInEvent.wait()
self.assertTrue(os.path.exists(fullPath0))
File().remove(files[0])
# We shouldn't actually remove the file here
self.assertTrue(os.path.exists(fullPath0))
with conditionRemoveDone:
conditionRemoveDone.notify()
t.join()
events.unbind('model.file.finalizeUpload.before', 'waitForCondition')
fullPath1 = adapter.fullPath(files[0])
self.assertEqual(fullPath0, fullPath1)
self.assertTrue(os.path.exists(fullPath1))
def testGridFSAssetstoreUpload(self):
# Clear any old DB data
base.dropGridFSDatabase('girder_test_upload_assetstore')
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_test_upload_assetstore')
self.assetstore = assetstore
self._testUpload()
def testGridFSReplicaSetAssetstoreUpload(self):
verbose = 0
if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split():
verbose = 2
# Starting the replica sets takes time (~25 seconds)
rscfg = mongo_replicaset.makeConfig()
mongo_replicaset.startMongoReplicaSet(rscfg, verbose=verbose)
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
# When the mongo connection to one of the replica sets goes down, it
# takes twice the socket timeout for us to reconnect and get on with
# an upload. We can override the default timeout by passing it as a
# mongodb uri parameter.
assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_assetstore_rs_upload_test',
mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,'
'127.0.0.1:27072/?socketTimeoutMS=5000&connectTimeoutMS=2500',
replicaset='replicaset')
self.assetstore = assetstore
self._testUpload()
# Test having the primary replica set going offline and then uploading
# again. If the current primary goes offline, it seems to take mongo
# 30 seconds to elect a new primary. If we step down the current
# primary before pausing it, then the new election will happen in 20
# seconds.
mongo_replicaset.stepDownMongoReplicaSet(rscfg, 0)
mongo_replicaset.waitForRSStatus(
rscfg,
mongo_replicaset.getMongoClient(rscfg, 0),
status=[2, (1, 2), (1, 2)],
verbose=verbose)
mongo_replicaset.pauseMongoReplicaSet(rscfg, [True], verbose=verbose)
self._uploadFile('rs_upload_1')
# Have a different member of the replica set go offline and the first
# come back. This takes a long time, so I am disabling it
# mongo_replicaset.pauseMongoReplicaSet(rscfg, [False, True], verbose=verbose)
# self._uploadFile('rs_upload_2')
# Have the set come back online and upload once more
mongo_replicaset.pauseMongoReplicaSet(rscfg, [False, False], verbose=verbose)
self._uploadFile('rs_upload_3')
mongo_replicaset.stopMongoReplicaSet(rscfg)
def testGridFSShardingAssetstoreUpload(self):
verbose = 0
if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split():
verbose = 2
# Starting the sharding service takes time
rscfg = mongo_replicaset.makeConfig(port=27073, shard=True, sharddb=None)
mongo_replicaset.startMongoReplicaSet(rscfg, verbose=verbose)
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
self.assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_assetstore_shard_upload_test',
mongohost='mongodb://127.0.0.1:27073', shard='auto')
self._testUpload()
# Verify that we have successfully sharded the collection
adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore)
stat = adapter.chunkColl.database.command('collstats', adapter.chunkColl.name)
self.assertTrue(bool(stat['sharded']))
# Although we have asked for multiple shards, the chunks may all be on
# one shard. Make sure at least one shard is reported.
self.assertGreaterEqual(len(stat['shards']), 1)
# Asking for the same database again should also report sharding. Use
# a slightly differt URI to ensure that the sharding is checked anew.
assetstore = Assetstore().createGridFsAssetstore(
name='Test 2', db='girder_assetstore_shard_upload_test',
mongohost='mongodb://127.0.0.1:27073/?', shard='auto')
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
stat = adapter.chunkColl.database.command('collstats', adapter.chunkColl.name)
self.assertTrue(bool(stat['sharded']))
mongo_replicaset.stopMongoReplicaSet(rscfg)
def testS3AssetstoreUpload(self):
# Clear the assetstore database and create an S3 assetstore
Assetstore().remove(self.assetstore)
params = {
'name': 'S3 Assetstore',
'bucket': 'bucketname',
'prefix': 'testprefix',
'accessKeyId': 'abc',
'secret': '123',
'service': base.mockS3Server.service
}
assetstore = Assetstore().createS3Assetstore(**params)
self.assetstore = assetstore
self._testUpload()
# make an untracked upload to test that we can find and clear it
client = boto3.client(
's3', endpoint_url=base.mockS3Server.service, aws_access_key_id='abc',
aws_secret_access_key='123')
client.create_multipart_upload(Bucket='bucketname', Key='testprefix/abandoned_upload')
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Ask to delete it
resp = self.request(path='/system/uploads', method='DELETE', user=self.admin)
self.assertStatusOk(resp)
# Check that it is gone
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
| {
"content_hash": "f63c13fc8399cb29ba0984acd8fc3527",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 96,
"avg_line_length": 44.46153846153846,
"alnum_prop": 0.6009071355092116,
"repo_name": "data-exp-lab/girder",
"id": "715227f06e119417bfd0cea44739088dd69ab493",
"size": "22180",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/cases/upload_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "42365"
},
{
"name": "CSS",
"bytes": "61237"
},
{
"name": "Dockerfile",
"bytes": "2416"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "170299"
},
{
"name": "JavaScript",
"bytes": "1399182"
},
{
"name": "Mako",
"bytes": "8756"
},
{
"name": "Python",
"bytes": "2388013"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "10593"
},
{
"name": "Shell",
"bytes": "7661"
}
],
"symlink_target": ""
} |
from datetime import date
from collections import defaultdict
# Imports from other SPC apps
from scipy_central.utils import get_IP_address
import models
static_items = {'spc-main-page': 1,
'spc-about-page': 2,
'spc-about-licenses': 3,
'spc-markup-help': 4,
'haystack_search': 5,
}
def create_hit(request, item, extra_info=None):
"""
Given a Django ``request`` object, create an entry in the DB for the hit.
If the ``item`` is a string, then we assume it is a static item and use
the dictionary above to look up its "primary key".
"""
ip_address = get_IP_address(request)
ua_string = request.META.get('HTTP_USER_AGENT', '')
if extra_info is None:
extra_info = request.META.get('HTTP_REFERER', None)
try:
page_hit = models.PageHit(ip_address=ip_address, ua_string=ua_string,
item=item._meta.module_name, item_pk=item.pk,
extra_info=extra_info)
except AttributeError:
page_hit = models.PageHit(ip_address=ip_address, ua_string=ua_string,
item=item, item_pk=static_items.get(item, 0),
extra_info=extra_info)
page_hit.save()
# TODO(KGD): cache this result for NN hours
def get_pagehits(item, start_date=None, end_date=None, item_pk=None):
"""
Returns a list of tuples of the form: [(n_hits, Submission.pk), ....]
This allows one to use the builtin ``list.sort()`` function where Python
orders the list based on the first entry in the tuple.
The list will be returned in the order of the ``Submission.pk``, but the
first tuple entry is the number of hits, allowing for easy sorting
using Python's ``sort`` method.
However, if ``item_pk`` is provided, then it simply returns the total
number of page views for that item, as an integer.
"""
if start_date is None:
start_date = date.min
if end_date is None:
end_date = date.max
# extra_info=None to avoid counting download hits
if item_pk is None:
page_hits = models.PageHit.objects.filter(item=item).\
filter(datetime__gte=start_date).\
filter(datetime__lte=end_date).\
filter(extra_info=None)
else:
page_hits = models.PageHit.objects.filter(item=item).\
filter(datetime__gte=start_date).\
filter(datetime__lte=end_date).\
filter(item_pk=item_pk).\
filter(extra_info=None)
return len(page_hits)
hits_by_pk = defaultdict(int)
for hit in page_hits:
hits_by_pk[hit.item_pk] += 1
hit_counts = []
for key, val in hits_by_pk.iteritems():
hit_counts.append((val, key))
return hit_counts
| {
"content_hash": "70646a3b9babbb37fe3e575fc82705ee",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 37.8125,
"alnum_prop": 0.5646280991735537,
"repo_name": "ksurya/SciPyCentral",
"id": "f6496a9eca7db6fa0e7d648970dd0766b8e923ca",
"size": "3044",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scipy_central/pagehit/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156551"
},
{
"name": "HTML",
"bytes": "77205"
},
{
"name": "JavaScript",
"bytes": "663466"
},
{
"name": "Python",
"bytes": "280829"
}
],
"symlink_target": ""
} |
"""Base test cases for all neutron tests.
"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import random
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
import six
import testtools
from neutron.agent.linux import external_process
from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg
from neutron.callbacks import manager as registry_manager
from neutron.callbacks import registry
from neutron.common import config
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron import policy
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
from neutron.tests import tools
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
def get_rand_name(max_length=None, prefix='test'):
"""Return a random string.
The string will start with 'prefix' and will be exactly 'max_length'.
If 'max_length' is None, then exactly 8 random characters, each
hexadecimal, will be added. In case len(prefix) <= len(max_length),
ValueError will be raised to indicate the problem.
"""
if max_length:
length = max_length - len(prefix)
if length <= 0:
raise ValueError("'max_length' must be bigger than 'len(prefix)'.")
suffix = ''.join(str(random.randint(0, 9)) for i in range(length))
else:
suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:]
return prefix + suffix
def get_rand_device_name(prefix='test'):
return get_rand_name(
max_length=constants.DEVICE_NAME_MAX_LEN, prefix=prefix)
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def get_test_timeout(default=0):
return int(os.environ.get('OS_TEST_TIMEOUT', 0))
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
class DietTestCase(testtools.TestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# FIXME(amuller): this must be called in the Neutron unit tests base
# class to initialize the DB connection string. Moving this may cause
# non-deterministic failures. Bug #1489098 for more info.
config.set_db_defaults()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
# Make sure we see all relevant deprecation warnings when running tests
self.useFixture(tools.WarningsFixture())
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = get_test_timeout()
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
self.orig_pid = os.getpid()
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
if os.getpid() != self.orig_pid:
# Subprocess - let it just exit
raise
# This makes sys.exit(0) still a failure
self.force_failure = True
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in six.iteritems(dic):
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def _setUp(self):
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# suppress all but errors here
capture_logs = bool_from_env('OS_LOG_CAPTURE')
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
self.useFixture(lockutils.ExternalLockFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.useFixture(fixtures.MonkeyPatch(
'oslo_config.cfg.find_config_files',
lambda project=None, prog=None, extension=None: []))
self.setup_rpc_mocks()
self.setup_config()
self.setup_test_registry_instance()
policy.init()
self.addCleanup(policy.reset)
self.addCleanup(rpc_consumer_reg.clear)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_test_registry_instance(self):
"""Give a private copy of the registry to each test."""
self._callback_manager = registry_manager.CallbacksManager()
mock.patch.object(registry, '_get_callback_manager',
return_value=self._callback_manager).start()
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None):
cp = PluginFixture(core_plugin)
self.useFixture(cp)
self.patched_dhcp_periodic = cp.patched_dhcp_periodic
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
super(PluginFixture, self).__init__()
self.core_plugin = core_plugin
def _setUp(self):
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
raise AssertionError(
'The plugin for this test was not deallocated.')
| {
"content_hash": "12582b8a0e2ae76f506cfb74f07774e0",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 79,
"avg_line_length": 34.822695035460995,
"alnum_prop": 0.639511201629328,
"repo_name": "mmnelemane/neutron",
"id": "cb5fb3ee66f9077f3b31044ed57ac2612277688f",
"size": "15344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7831412"
},
{
"name": "Shell",
"bytes": "13830"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import csv
import os
import sys
import tabulate
from fontTools import ttLib
args = argparse.ArgumentParser(
description='Print out usWidthClass of the fonts')
args.add_argument('font', nargs="+")
args.add_argument('--csv', default=False, action='store_true')
args.add_argument('--set', type=int, default=0)
args.add_argument('--autofix', default=False, action='store_true')
def print_info(fonts, print_csv=False):
headers = ['filename', 'usWidthClass']
rows = []
warnings = []
for font in fonts:
ttfont = ttLib.TTFont(font)
usWidthClass = ttfont['OS/2'].usWidthClass
rows.append([os.path.basename(font), usWidthClass])
if usWidthClass != 5:
warning = "WARNING: {} is {}, expected 5"
warnings.append(warning.format(font, usWidthClass))
def as_csv(rows):
writer = csv.writer(sys.stdout)
writer.writerows([headers])
writer.writerows(rows)
sys.exit(0)
if print_csv:
as_csv(rows)
print(tabulate.tabulate(rows, headers, tablefmt="pipe"))
for warn in warnings:
print(warn, file=sys.stderr)
def getFromFilename(filename):
if "UltraCondensed-" in filename:
usWidthClass = 1
elif "ExtraCondensed-" in filename:
usWidthClass = 2
elif "SemiCondensed-" in filename:
usWidthClass = 4
elif "Condensed-" in filename:
usWidthClass = 3
elif "SemiExpanded-" in filename:
usWidthClass = 6
elif "ExtraExpanded-" in filename:
usWidthClass = 8
elif "UltraExpanded-" in filename:
usWidthClass = 9
elif "Expanded-" in filename:
usWidthClass = 7
else:
usWidthClass = 5
return usWidthClass
def fix(fonts, value=None):
rows = []
headers = ['filename', 'usWidthClass was', 'usWidthClass now']
for font in fonts:
row = [font]
ttfont = ttLib.TTFont(font)
if not value:
usWidthClass = getFromFilename(font)
else:
usWidthClass = value
row.append(ttfont['OS/2'].usWidthClass)
ttfont['OS/2'].usWidthClass = usWidthClass
row.append(ttfont['OS/2'].usWidthClass)
ttfont.save(font + '.fix')
rows.append(row)
if rows:
print(tabulate.tabulate(rows, headers, tablefmt="pipe"))
if __name__ == '__main__':
arg = args.parse_args()
if arg.autofix:
fix(arg.font)
sys.exit(0)
if arg.set:
fix(arg.font, value=int(arg.set))
sys.exit(0)
print_info(arg.font, print_csv=arg.csv)
| {
"content_hash": "48bb02714da7a15d2d9d55a669df3b8d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 66,
"avg_line_length": 27.526315789473685,
"alnum_prop": 0.6175908221797323,
"repo_name": "davelab6/fontbakery",
"id": "d801549157f8ecdae1ada30c652759f915ec0870",
"size": "2637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/fontbakery-fix-widthclass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "471353"
}
],
"symlink_target": ""
} |
import logging
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from troveclient import common
INDEX_URL = reverse('horizon:project:databases:index')
LAUNCH_URL = reverse('horizon:project:databases:launch')
DETAILS_URL = reverse('horizon:project:databases:detail', args=['id'])
class DatabaseTests(test.TestCase):
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
# Check the Host column displaying ip or hostname
self.assertContains(res, '10.0.0.3')
self.assertContains(res, 'trove.instance-2.com')
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_flavor_exception(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs(
{api.trove: ('instance_list',)})
def test_index_list_exception(self):
# Mock database instances
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_pagination(self):
# Mock database instances
databases = self.databases.list()
last_record = databases[1]
databases = common.Paginated(databases, next_marker="foo")
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_flavor_list_exception(self):
# Mocking instances.
databases = common.Paginated(self.databases.list())
api.trove.instance_list(
IsA(http.HttpRequest),
marker=None,
).AndReturn(databases)
# Mocking flavor list with raising an exception.
api.trove.flavor_list(
IsA(http.HttpRequest),
).AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list',
'datastore_list', 'datastore_version_list')})
def test_launch_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest))\
.AndReturn(self.database_backups.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str)).AndReturn(self.datastore_versions.list())
self.mox.ReplayAll()
res = self.client.get(LAUNCH_URL)
self.assertTemplateUsed(res, 'project/databases/launch.html')
@test.create_stubs({api.trove: ('flavor_list',)})
def test_launch_instance_exception_on_flavors(self):
trove_exception = self.exceptions.nova
api.trove.flavor_list(IsA(http.HttpRequest)).AndRaise(trove_exception)
self.mox.ReplayAll()
toSuppress = ["openstack_dashboard.dashboards.project.databases."
"workflows.create_instance",
"horizon.workflows.base"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
with self.assertRaises(exceptions.Http302):
self.client.get(LAUNCH_URL)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list'),
api.neutron: ('network_list',)})
def test_create_simple_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str)).AndReturn(self.datastore_versions.list())
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(unicode),
IsA(int),
IsA(unicode),
databases=None,
datastore=IsA(unicode),
datastore_version=IsA(unicode),
restore_point=None,
users=None,
nics=nics).AndReturn(self.databases.first())
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list'),
api.neutron: ('network_list',)})
def test_create_simple_instance_exception(self):
trove_exception = self.exceptions.nova
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str)).AndReturn(self.datastore_versions.list())
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(unicode),
IsA(int),
IsA(unicode),
databases=None,
datastore=IsA(unicode),
datastore_version=IsA(unicode),
restore_point=None,
users=None,
nics=nics).AndRaise(trove_exception)
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(
{api.trove: ('instance_get', 'flavor_get',)})
def _test_details(self, database, with_designate=False):
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.flavors.first())
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res, 'project/databases/detail.html')
if with_designate:
self.assertContains(res, database.hostname)
else:
self.assertContains(res, database.ip[0])
def test_details_with_ip(self):
database = self.databases.first()
self._test_details(database, with_designate=False)
def test_details_with_hostname(self):
database = self.databases.list()[1]
self._test_details(database, with_designate=True)
@test.create_stubs(
{api.trove: ('instance_get', 'flavor_get', 'users_list',
'user_list_access', 'user_delete')})
def test_user_delete(self):
database = self.databases.first()
user = self.database_users.first()
user_db = self.database_user_dbs.first()
database_id = database.id
# Instead of using the user's ID, the api uses the user's name. BOOO!
user_id = user.name
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.flavors.first())
# tabs.py: UserTab.get_user_data
api.trove.users_list(IsA(http.HttpRequest),
database_id).AndReturn([user])
api.trove.user_list_access(IsA(http.HttpRequest),
database_id,
user_id).AndReturn([user_db])
# tables.py: DeleteUser.delete
api.trove.user_delete(IsA(http.HttpRequest),
database_id,
user_id).AndReturn(None)
self.mox.ReplayAll()
details_url = reverse('horizon:project:databases:detail',
args=[database_id])
url = details_url + '?tab=instance_details__users_tab'
action_string = u"users__delete__%s" % user_id
form_data = {'action': action_string}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({
api.trove: ('instance_get', 'instance_resize_volume'),
})
def test_resize_volume(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
# forms.py: ResizeVolumeForm.handle
api.trove.instance_resize_volume(IsA(http.HttpRequest),
database_id,
IsA(int)).AndReturn(None)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size + 1,
}
res = self.client.post(url, post)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({
api.trove: ('instance_get', 'instance_resize_volume'),
})
def test_resize_volume_bad_value(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size,
}
res = self.client.post(url, post)
self.assertContains(res,
"New size for volume must be greater than current size.")
| {
"content_hash": "bbe346f854c0051f5d4aa82a3db56dd8",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 78,
"avg_line_length": 37.09814323607427,
"alnum_prop": 0.5835835835835835,
"repo_name": "zouyapeng/horizon_change",
"id": "2a090dd86d2fc51b159622ffa810227c4dd9015f",
"size": "14628",
"binary": false,
"copies": "7",
"ref": "refs/heads/juno",
"path": "openstack_dashboard/dashboards/project/databases/tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2270222"
},
{
"name": "HTML",
"bytes": "427249"
},
{
"name": "JavaScript",
"bytes": "270670"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4048852"
},
{
"name": "Shell",
"bytes": "17483"
}
],
"symlink_target": ""
} |
"""Component builders for non-recurrent networks in DRAGNN."""
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
from dragnn.python import component
from dragnn.python import dragnn_ops
from dragnn.python import network_units
from syntaxnet.util import check
def fetch_linked_embedding(comp, network_states, feature_spec):
"""Looks up linked embeddings in other components.
Args:
comp: ComponentBuilder object with respect to which the feature is to be
fetched
network_states: dictionary of NetworkState objects
feature_spec: FeatureSpec proto for the linked feature to be looked up
Returns:
NamedTensor containing the linked feature tensor
Raises:
NotImplementedError: if a linked feature with source translator other than
'identity' is configured.
RuntimeError: if a recurrent linked feature is configured.
"""
if feature_spec.source_translator != 'identity':
raise NotImplementedError(feature_spec.source_translator)
if feature_spec.source_component == comp.name:
raise RuntimeError(
'Recurrent linked features are not supported in bulk extraction.')
tf.logging.info('[%s] Adding linked feature "%s"', comp.name,
feature_spec.name)
source = comp.master.lookup_component[feature_spec.source_component]
return network_units.NamedTensor(
network_states[source.name].activations[
feature_spec.source_layer].bulk_tensor,
feature_spec.name)
def _validate_embedded_fixed_features(comp):
"""Checks that the embedded fixed features of |comp| are set up properly."""
for feature in comp.spec.fixed_feature:
check.Gt(feature.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature)
if feature.is_constant:
check.IsTrue(feature.HasField('pretrained_embedding_matrix'),
'Constant embeddings must be pretrained: %s' % feature)
def fetch_differentiable_fixed_embeddings(comp, state, stride):
"""Looks up fixed features with separate, differentiable, embedding lookup.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
state.handle, indices, ids, weights, num_steps = (
dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels))
fixed_embeddings = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
differentiable_or_constant = ('constant' if feature_spec.is_constant else
'differentiable')
tf.logging.info('[%s] Adding %s fixed feature "%s"', comp.name,
differentiable_or_constant, feature_spec.name)
size = stride * num_steps * feature_spec.size
fixed_embedding = network_units.embedding_lookup(
comp.get_variable(network_units.fixed_embeddings_name(channel)),
indices[channel], ids[channel], weights[channel], size)
if feature_spec.is_constant:
fixed_embedding = tf.stop_gradient(fixed_embedding)
fixed_embeddings.append(
network_units.NamedTensor(fixed_embedding, feature_spec.name))
return state.handle, fixed_embeddings
def fetch_fast_fixed_embeddings(comp, state):
"""Looks up fixed features with fast, non-differentiable, op.
Since BulkFixedEmbeddings is non-differentiable with respect to the
embeddings, the idea is to call this function only when the graph is
not being used for training.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
tf.logging.info('[%s] Adding %d fast fixed features', comp.name, num_channels)
state.handle, bulk_embeddings, _ = dragnn_ops.bulk_fixed_embeddings(
state.handle, [
comp.get_variable(network_units.fixed_embeddings_name(c))
for c in range(num_channels)
],
component=comp.name)
bulk_embeddings = network_units.NamedTensor(bulk_embeddings,
'bulk-%s-fixed-features' %
comp.name)
return state.handle, [bulk_embeddings]
def extract_fixed_feature_ids(comp, state, stride):
"""Extracts fixed feature IDs.
Args:
comp: Component whose fixed feature IDs we wish to extract.
state: Live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: Updated state handle to be used after this call.
ids: List of [stride * num_steps, 1] feature IDs per channel. Missing IDs
(e.g., due to batch padding) are set to -1.
"""
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
for feature_spec in comp.spec.fixed_feature:
check.Eq(feature_spec.size, 1, 'All features must have size=1')
check.Lt(feature_spec.embedding_dim, 0, 'All features must be non-embedded')
state.handle, indices, ids, _, num_steps = dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels)
size = stride * num_steps
fixed_ids = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
tf.logging.info('[%s] Adding fixed feature IDs "%s"', comp.name,
feature_spec.name)
# The +1 and -1 increments ensure that missing IDs default to -1.
#
# TODO(googleuser): This formula breaks if multiple IDs are extracted at some
# step. Try using tf.unique() to enforce the unique-IDS precondition.
sums = tf.unsorted_segment_sum(ids[channel] + 1, indices[channel], size) - 1
sums = tf.expand_dims(sums, axis=1)
fixed_ids.append(network_units.NamedTensor(sums, feature_spec.name, dim=1))
return state.handle, fixed_ids
def update_network_states(comp, tensors, network_states, stride):
"""Stores Tensor objects corresponding to layer outputs.
For use in subsequent tasks.
Args:
comp: Component for which the tensor handles are being stored.
tensors: list of Tensors to store
network_states: dictionary of component NetworkState objects
stride: stride of the stored tensor.
"""
network_state = network_states[comp.name]
with tf.name_scope(comp.name + '/stored_act'):
for index, network_tensor in enumerate(tensors):
network_state.activations[comp.network.layers[index].name] = (
network_units.StoredActivations(tensor=network_tensor, stride=stride,
dim=comp.network.layers[index].dim))
def build_cross_entropy_loss(logits, gold):
"""Constructs a cross entropy from logits and one-hot encoded gold labels.
Supports skipping rows where the gold label is the magic -1 value.
Args:
logits: float Tensor of scores.
gold: int Tensor of one-hot labels.
Returns:
cost, correct, total: the total cost, the total number of correctly
predicted labels, and the total number of valid labels.
"""
valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
gold = tf.gather(gold, valid)
logits = tf.gather(logits, valid)
correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
total = tf.size(gold)
cost = tf.reduce_sum(
tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits(
logits, tf.cast(gold, tf.int64))) / tf.cast(total, tf.float32)
return cost, correct, total
class BulkFeatureExtractorComponentBuilder(component.ComponentBuilderBase):
"""A component builder to bulk extract features.
Both fixed and linked features are supported, with some restrictions:
1. Fixed features may not be recurrent. Fixed features are extracted along the
gold path, which does not work during inference.
2. Linked features may not be recurrent and are 'untranslated'. For now,
linked features are extracted without passing them through any transition
system or source translator.
"""
def build_greedy_training(self, state, network_states):
"""Extracts features and advances a batch using the oracle path.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects
Returns:
state handle: final state after advancing
cost: regularization cost, possibly associated with embedding matrices
correct: since no gold path is available, 0.
total: since no gold path is available, 0.
"""
logging.info('Building component: %s', self.spec.name)
stride = state.current_batch_size * self.training_beam_size
with tf.variable_scope(self.name, reuse=True):
state.handle, fixed_embeddings = fetch_differentiable_fixed_embeddings(
self, state, stride)
linked_embeddings = [
fetch_linked_embedding(self, network_states, spec)
for spec in self.spec.linked_feature
]
with tf.variable_scope(self.name, reuse=True):
tensors = self.network.create(
fixed_embeddings, linked_embeddings, None, None, True, stride=stride)
update_network_states(self, tensors, network_states, stride)
cost = self.add_regularizer(tf.constant(0.))
correct, total = tf.constant(0), tf.constant(0)
return state.handle, cost, correct, total
def build_greedy_inference(self, state, network_states,
during_training=False):
"""Extracts features and advances a batch using the oracle path.
NOTE(danielandor) For now this method cannot be called during training.
That is to say, unroll_using_oracle for this component must be set to true.
This will be fixed by separating train_with_oracle and train_with_inference.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects
during_training: whether the graph is being constructed during training
Returns:
state handle: final state after advancing
"""
logging.info('Building component: %s', self.spec.name)
if during_training:
stride = state.current_batch_size * self.training_beam_size
else:
stride = state.current_batch_size * self.inference_beam_size
with tf.variable_scope(self.name, reuse=True):
if during_training:
state.handle, fixed_embeddings = fetch_differentiable_fixed_embeddings(
self, state, stride)
else:
state.handle, fixed_embeddings = fetch_fast_fixed_embeddings(self,
state)
linked_embeddings = [
fetch_linked_embedding(self, network_states, spec)
for spec in self.spec.linked_feature
]
with tf.variable_scope(self.name, reuse=True):
tensors = self.network.create(
fixed_embeddings,
linked_embeddings,
None,
None,
during_training=during_training,
stride=stride)
update_network_states(self, tensors, network_states, stride)
return state.handle
class BulkFeatureIdExtractorComponentBuilder(component.ComponentBuilderBase):
"""A component builder to bulk extract feature IDs.
This is a variant of BulkFeatureExtractorComponentBuilder that only supports
fixed features, and extracts raw feature IDs instead of feature embeddings.
Since the extracted feature IDs are integers, the results produced by this
component are in general not differentiable.
"""
def __init__(self, master, component_spec):
"""Initializes the feature ID extractor component.
Args:
master: dragnn.MasterBuilder object.
component_spec: dragnn.ComponentSpec proto to be built.
"""
super(BulkFeatureIdExtractorComponentBuilder, self).__init__(
master, component_spec)
check.Eq(len(self.spec.linked_feature), 0, 'Linked features are forbidden')
for feature_spec in self.spec.fixed_feature:
check.Lt(feature_spec.embedding_dim, 0,
'Features must be non-embedded: %s' % feature_spec)
def build_greedy_training(self, state, network_states):
"""See base class."""
state.handle = self._extract_feature_ids(state, network_states, True)
cost = self.add_regularizer(tf.constant(0.))
correct, total = tf.constant(0), tf.constant(0)
return state.handle, cost, correct, total
def build_greedy_inference(self, state, network_states,
during_training=False):
"""See base class."""
return self._extract_feature_ids(state, network_states, during_training)
def _extract_feature_ids(self, state, network_states, during_training):
"""Extracts feature IDs and advances a batch using the oracle path.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: Dictionary of component NetworkState objects.
during_training: Whether the graph is being constructed during training.
Returns:
state handle: Final state after advancing.
"""
logging.info('Building component: %s', self.spec.name)
if during_training:
stride = state.current_batch_size * self.training_beam_size
else:
stride = state.current_batch_size * self.inference_beam_size
with tf.variable_scope(self.name, reuse=True):
state.handle, ids = extract_fixed_feature_ids(self, state, stride)
with tf.variable_scope(self.name, reuse=True):
tensors = self.network.create(
ids, [], None, None, during_training, stride=stride)
update_network_states(self, tensors, network_states, stride)
return state.handle
class BulkAnnotatorComponentBuilder(component.ComponentBuilderBase):
"""A component builder to bulk annotate or compute the cost of a gold path.
This component can be used with features that don't depend on the
transition system state.
Since no feature extraction is performed, only non-recurrent
'identity' linked features are supported.
If a FeedForwardNetwork is configured with no hidden units, this component
acts as a 'bulk softmax' component.
"""
def build_greedy_training(self, state, network_states):
"""Advances a batch using oracle paths, returning the overall CE cost.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects
Returns:
(state handle, cost, correct, total): TF ops corresponding to the final
state after unrolling, the total cost, the total number of correctly
predicted actions, and the total number of actions.
Raises:
RuntimeError: if fixed features are configured.
"""
logging.info('Building component: %s', self.spec.name)
if self.spec.fixed_feature:
raise RuntimeError(
'Fixed features are not compatible with bulk annotation. '
'Use the "bulk-features" component instead.')
linked_embeddings = [
fetch_linked_embedding(self, network_states, spec)
for spec in self.spec.linked_feature
]
stride = state.current_batch_size * self.training_beam_size
with tf.variable_scope(self.name, reuse=True):
network_tensors = self.network.create([], linked_embeddings, None, None,
True, stride)
update_network_states(self, network_tensors, network_states, stride)
logits = self.network.get_logits(network_tensors)
state.handle, gold = dragnn_ops.bulk_advance_from_oracle(
state.handle, component=self.name)
cost, correct, total = build_cross_entropy_loss(logits, gold)
cost = self.add_regularizer(cost)
return state.handle, cost, correct, total
def build_greedy_inference(self, state, network_states,
during_training=False):
"""Annotates a batch of documents using network scores.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: dictionary of component NetworkState objects
during_training: whether the graph is being constructed during training
Returns:
Handle to the state once inference is complete for this Component.
Raises:
RuntimeError: if fixed features are configured
"""
logging.info('Building component: %s', self.spec.name)
if self.spec.fixed_feature:
raise RuntimeError(
'Fixed features are not compatible with bulk annotation. '
'Use the "bulk-features" component instead.')
linked_embeddings = [
fetch_linked_embedding(self, network_states, spec)
for spec in self.spec.linked_feature
]
if during_training:
stride = state.current_batch_size * self.training_beam_size
else:
stride = state.current_batch_size * self.inference_beam_size
with tf.variable_scope(self.name, reuse=True):
network_tensors = self.network.create(
[], linked_embeddings, None, None, during_training, stride)
update_network_states(self, network_tensors, network_states, stride)
logits = self.network.get_logits(network_tensors)
return dragnn_ops.bulk_advance_from_prediction(
state.handle, logits, component=self.name)
| {
"content_hash": "234ff9481fe8b8704a4437ca3add03bc",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 81,
"avg_line_length": 38.995670995671,
"alnum_prop": 0.6951043516873889,
"repo_name": "wangyang59/tf_models",
"id": "f00ac92fed7a2b914d2dcbf4071c30c1b9d92ffc",
"size": "18694",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "syntaxnet/dragnn/python/bulk_component.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1216671"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "61098"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "24249"
},
{
"name": "Python",
"bytes": "3406328"
},
{
"name": "Shell",
"bytes": "48362"
}
],
"symlink_target": ""
} |
"""
This is the Python implementation for the Java package "java.util.concurrent.atomic", compiled by Pava.
"""
import pava
from pava import nan, inf
pava_classes = {}
pava.module(__name__)
import atomic
| {
"content_hash": "b5f53e6129564cfdab692c176557b012",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 103,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.7259615384615384,
"repo_name": "laffra/pava",
"id": "368488f3704e51df9455ed8196963c85898861a0",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pava/implementation/natives/java/util/concurrent/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144"
},
{
"name": "Python",
"bytes": "369288"
}
],
"symlink_target": ""
} |
class AI:
"""Player defined AI
Attributes:
name (str): AI name
source (str): source code or .PER file
type (int): AI type
unknown1 (int): unknown1
unknown2 (int): unknown2
"""
def __init__(self, name="", source="", type=0,
unknown1=0, unknown2=0):
"""Create player defined AI
Args:
name (str, optional): name of AI, ""
source (str, optional): source code of AI, ""
type (int, optional): AI type
unknown1 (int, optional): unknown, 0
unknown2 (int, optional): unknown, 0
"""
self.name = name
self.type = type
self.source = source
self.unknown1 = unknown1
self.unknown2 = unknown2
def __repr__(self):
name = "AI:\n"
info = "\tname: {}\n\tsource length: {}\n".format(
self.name, len(self.source))
t = ""
if self.type == 0:
t = "0 (Custom)"
elif self.type == 1:
t = "1 (Standard)"
elif self.type == 2:
t = "2 (None)"
info2 = "\ttype: {}\n".format(t)
info3 = "\tunknown1: {}\n\tunknown2: {}\n".format(
self.unknown1, self.unknown2)
return name + info + info2 + info3
def toJSON(self):
"""return JSON"""
data = dict()
data['name'] = self.name
data['type'] = self.type
data['source'] = self.source
return data
def readSource(self, filename):
"""Load source code from file
Args:
filename (str): file, from which AI will be loaded
Return:
(int): number of characters readed
"""
f = open(filename, 'r', encoding='utf-8')
self.source = f.read()
f.close()
return len(self.source)
def saveSource(self, filename):
"""Save source code from AI to a file
Args:
filename (str): file, in which AI will be written
"""
f = open(filename, 'w')
f.write(self.source)
f.close()
def clear(self):
"""Remove AI
Todo:
not sure with TYPE
"""
self.name = ""
self.source = ""
self.type = 0
| {
"content_hash": "8e68cd39f3cffb2df7e74e83b5a3c5d5",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 62,
"avg_line_length": 25.322222222222223,
"alnum_prop": 0.4853005704256253,
"repo_name": "dderevjanik/agescx",
"id": "3e8b78dac584db17a7db078ba1fa1f62f35b8037",
"size": "2279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agescx/models/ai.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "23"
},
{
"name": "Python",
"bytes": "121852"
}
],
"symlink_target": ""
} |
class ScryfallSet:
def __init__(self, data):
self.name = data["name"]
self.code = data["code"]
self.released_at = data.get("released_at", None)
self.block = data.get("block", None)
self.block_code = data.get("block_code", None)
self.parent_set_code = data.get("parent_set_code", None)
def __str__(self):
out = []
out.append("{} ({})".format(self.name, self.code))
if self.released_at:
out.append("Released: {}".format(self.released_at))
if self.block and self.block_code:
out.append("Block: {} ({})".format(self.block, self.block_code))
if self.parent_set_code:
out.append("Parent Set: {}".format(self.parent_set_code)) # TODO: add parent set name
return "\n".join(out) | {
"content_hash": "80afc8a01c0b37ce796f042e8b4b774f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 98,
"avg_line_length": 40.6,
"alnum_prop": 0.562807881773399,
"repo_name": "PolarPayne/scrycli",
"id": "e260b532c7eab9212117166d721324b6dd433f3c",
"size": "812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrycli/scryfall/set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14652"
}
],
"symlink_target": ""
} |
class Persona:
# Init es el constructor, en esta funcion vamos a pasar como parametro los valores iniciales de los atributos
# Recordar siempre pasar self como primer parametro
def __init__(self, nombre, apellido, dni, fechaNacimiento):
# Asignamos a cada atributo el valor del parametro
self.nombre = nombre
self.apellido = apellido
self.dni = dni
self.fechaNacimiento = fechaNacimiento
# Str nos devuelve un string que indica informalmente que es esta entidad
# De nuevo, siempre self debe ser un primer parametro
# Lo ejecutamos haciendo str(instancia)
def __str__(self):
# String formatting: donde aparece %s indica que se va a reemplazar por el contenido de esa posicion de la tupla
# Por ejemplo: "Este %s un %s" % ("es", "ejemplo") == "Este es un ejemplo"
return "%s %s, DNI %s, %s" % (self.nombre, self.apellido, self.dni, self.fechaNacimiento)
# Repr deberia devolver una expresion python valida que genere esta instancia
# Nuevamente, self siempre va como primer parametro
# Lo ejecutamos haciendo repr(instancia), y va a ser llamado cada vez que se quiera mostrar el valor de la instancia actual por ejemplo dentro de una lista
def __repr__(self):
return "Persona(%s, %s, %s, %s)" % (self.nombre, self.apellido, self.dni, self.fechaNacimiento)
# Metodo para pedir los atributos de una persona por consola y crear una nueva instancia
def pedir_persona():
# Con raw_input() levantamos lo que escribio el usuario, similar al cin de C++, pero nos devuelve siempre string
print "Ingrese nombre:"
nombre = raw_input()
print "Apellido:"
apellido = raw_input()
# Como queremos que el DNI sea entero, lo convertimos de string a entero usando int()
print "DNI:"
dni = int(raw_input())
# Lo mismo con el anio de nacimiento
print "Anio nacimiento:"
year = int(raw_input())
# Creamos una nueva instancia de la clase persona con los valores que le pedimos al usuario
nuevaPersona = Persona(nombre, apellido, dni, year)
# Y la devolvemos
return nuevaPersona
def main():
# Pedimos la nueva persona
persona = pedir_persona()
print
# Mostramos que es str(persona)
print "str(persona)"
print str(persona)
print
# Lo mismo para repr
print "repr(persona)"
print repr(persona)
print
# Podemos acceder a un atributo en particular y mostrarlo
print "persona.apellido"
print persona.apellido
print
# Podemos modificarlo y mostrar el nuevo valor
print "persona.apellido = 'Nuevoapellido'"
persona.apellido = 'Nuevoapellido'
print "persona.apellido"
print persona.apellido
print
# Y mostramos nuevamente la representacion despues de haber modificado la persona, notar que no hace falta el str()
print "str(persona)"
print persona
print
if __name__ == '__main__':
main()
| {
"content_hash": "3c5c473dea808da249bb29430cbe1ceb",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 159,
"avg_line_length": 36.40243902439025,
"alnum_prop": 0.6760469011725293,
"repo_name": "spalladino/algoritmos-gcba",
"id": "e6b9e497fad012212e262e010b7d729c8ba9ab7f",
"size": "3068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codigo/python-clases/clases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22481"
},
{
"name": "Shell",
"bytes": "254"
},
{
"name": "TeX",
"bytes": "153920"
}
],
"symlink_target": ""
} |
import socket
import requests
import urllib2
import json
import msgpack
import collections
import time, thread, threading
from flask import request, Request, jsonify
from flask import Flask
from flask import make_response
app = Flask(__name__)
projects_map = {}
xos_tenant_info_map = {}
xos_instances_info_map = {}
use_kafka = True
if use_kafka:
import kafka
from kafka import TopicPartition
else:
UDP_IP = "0.0.0.0"
UDP_PORT = 12346
@app.route('/autoscaledata',methods=['GET'])
def autoscaledata():
response = app.make_response(json.dumps(projects_map.values()))
response.mimetype="application/json"
return response
def acquire_xos_monitoring_channel():
url = "http://ctl:9999/xoslib/monitoringchannel/"
admin_auth=("padmin@vicci.org", "letmein") # use your XOS username and password
monitoring_channels = requests.get(url, auth=admin_auth).json()
ceilometer_url = None
if not monitoring_channels:
print 'SRIKANTH: No monitoring channels for this user...'
return None
else:
monitoring_channel = monitoring_channels[0]
while not monitoring_channel['ceilometer_url']:
print 'SRIKANTH: Waiting for monitoring channel create'
sleep(0.5)
monitoring_channel = requests.get(url, auth=admin_auth).json()[0]
#TODO: Wait until URL is completely UP
while True:
print 'SRIKANTH: Waiting for ceilometer proxy URL %s is available' % monitoring_channel['ceilometer_url']
try:
response = urllib2.urlopen(monitoring_channel['ceilometer_url'],timeout=1)
break
except urllib2.HTTPError, e:
print 'SRIKANTH: HTTP error %s' % e.reason
break
except urllib2.URLError, e:
print 'SRIKANTH: URL error %s' % e.reason
pass
return monitoring_channel
def print_samples():
print ""
print ""
for project in projects_map.keys():
print "service=%s slice=%s, alarm_state=%s" % (projects_map[project]['service'], projects_map[project]['slice'] if projects_map[project]['slice'] else project, projects_map[project]['alarm'])
for resource in projects_map[project]['resources'].keys():
print "resource=%s" % (projects_map[project]['resources'][resource]['xos_instance_info']['instance_name'] if projects_map[project]['resources'][resource]['xos_instance_info'] else resource)
for i in projects_map[project]['resources'][resource]['queue']:
print " time=%s val=%s" % ( i['timestamp'],i['counter_volume'])
def periodic_print():
print_samples()
#Print every 1minute
threading.Timer(20, periodic_print).start()
CPU_UPPER_THRESHOLD = 80 #80%
CPU_LOWER_THRESHOLD = 30 #30%
CPU_THRESHOLD_REPEAT = 3
INITIAL_STATE = 'normal_config'
SCALE_UP_EVALUATION = 'scale_up_eval'
SCALE_DOWN_EVALUATION = 'scale_down_eval'
SCALE_UP_ALARM = 'scale_up'
SCALE_DOWN_ALARM = 'scale_down'
def loadAllXosTenantInfo():
print "SRIKANTH: Loading all XOS tenant info"
url = "http://ctl:9999/xos/controllerslices/"
admin_auth=("padmin@vicci.org", "letmein") # use your XOS username and password
controller_slices = requests.get(url, auth=admin_auth).json()
for cslice in controller_slices:
slice = requests.get(cslice['slice'], auth=admin_auth).json()
slice_name = slice['humanReadableName']
if slice['service']:
service = requests.get(slice['service'], auth=admin_auth).json()
service_name = service['humanReadableName']
else:
service_name = None
xos_tenant_info_map[cslice['tenant_id']] = {'service':service_name, 'slice':slice_name}
print "SRIKANTH: Project: %s Service:%s Slice:%s" % (cslice['tenant_id'],service_name,slice_name)
def loadAllXosInstanceInfo():
print "SRIKANTH: Loading all XOS instance info"
url = "http://ctl:9999/xos/instances/"
admin_auth=("padmin@vicci.org", "letmein") # use your XOS username and password
xos_instances = requests.get(url, auth=admin_auth).json()
for instance in xos_instances:
xos_instances_info_map[instance['instance_uuid']] = {'instance_name':instance['instance_name']}
def getXosTenantInfo(project):
xos_tenant_info = xos_tenant_info_map.get(project, None)
if xos_tenant_info:
return xos_tenant_info
else:
loadAllXosTenantInfo()
xos_tenant_info = xos_tenant_info_map.get(project, None)
if not xos_tenant_info:
print "SRIKANTH: Project %s has no associated XOS slice" % project
return xos_tenant_info
def getXosInstanceInfo(resource):
xos_instance_info = xos_instances_info_map.get(resource, None)
if xos_instance_info:
return xos_instance_info
else:
loadAllXosInstanceInfo()
xos_instance_info = xos_instances_info_map.get(resource, None)
if not xos_instance_info:
print "SRIKANTH: Resource %s has no associated XOS instance" % project
return xos_instance_info
def handle_adjust_scale(project, adjust):
if (adjust != 'up') and (adjust != 'down'):
print "SRIKANTH: Invalid adjust value %s " % adjust
return
current_instances = len(projects_map[project]['resources'].keys())
if (current_instances <=1 and adjust == 'down'):
print "SRIKANTH: %s is running with already minimum instances and can not scale down further " % project
return
if (current_instances >=2 and adjust == 'up'):
print "SRIKANTH: %s is running with already maximum instances and can not scale up further " % project
return
#xos_tenant = getXosTenantInfo(project)
xos_service = projects_map[project]['service']
xos_slice = projects_map[project]['slice']
if not xos_service or not xos_slice:
print "SRIKANTH: Can not handle adjust_scale for Project %s because not associated with any service or slice" % project
return
print "SRIKANTH: SCALE %s for Project %s, Slice=%s, Service=%s from current=%d to new=%d" % (adjust, project, xos_slice, xos_service, current_instances, current_instances+1 if (adjust=='up') else current_instances-1)
query_params = {'service':xos_service, 'slice_hint':xos_slice, 'scale':current_instances+1 if (adjust=='up') else current_instances-1}
url = "http://ctl:9999/xoslib/serviceadjustscale/"
admin_auth=("padmin@vicci.org", "letmein") # use your XOS username and password
response = requests.get(url, params=query_params, auth=admin_auth).json()
print "SRIKANTH: XOS adjust_scale response: %s" % response
def periodic_cpu_threshold_evaluator():
for project in projects_map.keys():
aggregate_cpu_util = sum([resource['queue'][-1]['counter_volume'] \
for resource in projects_map[project]['resources'].values()]) \
/len(projects_map[project]['resources'].keys())
if (projects_map[project]['alarm'] == INITIAL_STATE or
projects_map[project]['alarm'] == SCALE_UP_ALARM or
projects_map[project]['alarm'] == SCALE_DOWN_ALARM):
if aggregate_cpu_util > CPU_UPPER_THRESHOLD:
projects_map[project]['uthreadshold_count'] = 1
projects_map[project]['alarm'] = SCALE_UP_EVALUATION
if projects_map[project]['uthreadshold_count'] >= CPU_THRESHOLD_REPEAT:
projects_map[project]['alarm'] = SCALE_UP_ALARM
handle_adjust_scale(project, 'up')
elif aggregate_cpu_util < CPU_LOWER_THRESHOLD:
projects_map[project]['lthreadshold_count'] = 1
projects_map[project]['alarm'] = SCALE_DOWN_EVALUATION
if projects_map[project]['lthreadshold_count'] >= CPU_THRESHOLD_REPEAT:
projects_map[project]['alarm'] = SCALE_DOWN_ALARM
handle_adjust_scale(project, 'down')
else:
projects_map[project]['uthreadshold_count'] = 0
projects_map[project]['lthreadshold_count'] = 0
projects_map[project]['alarm'] = INITIAL_STATE
elif projects_map[project]['alarm'] == SCALE_UP_EVALUATION:
if aggregate_cpu_util > CPU_UPPER_THRESHOLD:
projects_map[project]['uthreadshold_count'] += 1
if projects_map[project]['uthreadshold_count'] >= CPU_THRESHOLD_REPEAT:
projects_map[project]['alarm'] = SCALE_UP_ALARM
handle_adjust_scale(project, 'up')
elif aggregate_cpu_util < CPU_LOWER_THRESHOLD:
projects_map[project]['lthreadshold_count'] += 1
projects_map[project]['alarm'] = SCALE_DOWN_EVALUATION
else:
projects_map[project]['uthreadshold_count'] = 0
projects_map[project]['alarm'] = INITIAL_STATE
elif projects_map[project]['alarm'] == SCALE_DOWN_EVALUATION:
if aggregate_cpu_util < CPU_LOWER_THRESHOLD:
projects_map[project]['lthreadshold_count'] += 1
if projects_map[project]['lthreadshold_count'] >= CPU_THRESHOLD_REPEAT:
projects_map[project]['alarm'] = SCALE_DOWN_ALARM
handle_adjust_scale(project, 'down')
elif aggregate_cpu_util > CPU_UPPER_THRESHOLD:
projects_map[project]['uthreadshold_count'] += 1
projects_map[project]['alarm'] = SCALE_UP_EVALUATION
else:
projects_map[project]['lthreadshold_count'] = 0
projects_map[project]['alarm'] = INITIAL_STATE
threading.Timer(20, periodic_cpu_threshold_evaluator).start()
def read_notification_from_ceilometer_over_kafka(host,port,topic):
print "Kafka target" , host, port, topic
try :
consumer=kafka.KafkaConsumer(bootstrap_servers=["%s:%s" % (host,port)])
consumer.assign([TopicPartition(topic,0)])
consumer.seek_to_end()
for message in consumer:
#print message.value
#logging.debug("%s",message.value)
process_notification_from_ceilometer(json.loads(message.value))
#status = process_ceilometer_message(json.loads(message.value),message.value)
#print status
except Exception as e:
print "AUTO_SCALE Exception:",e
def read_notification_from_ceilometer(host,port):
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
udp.bind((host, port))
while True:
data, source = udp.recvfrom(64000)
try:
sample = msgpack.loads(data, encoding='utf-8')
process_notification_from_ceilometer(sample)
except Exception as e:
print e
def process_notification_from_ceilometer(sample):
if sample['counter_name'] == 'instance':
if 'delete' in sample['resource_metadata']['event_type']:
xosTenantInfo = getXosTenantInfo(sample['project_id'])
xosResourceInfo = getXosInstanceInfo(sample['resource_id'])
print "SRIKANTH: Project %s Instance %s is getting deleted" % (xosTenantInfo['slice'] if xosTenantInfo['slice'] else sample['project_id'],xosResourceInfo)
if sample['project_id'] not in projects_map.keys():
return
if sample['resource_id'] not in projects_map[sample['project_id']]['resources'].keys():
return
projects_map[sample['project_id']]['resources'].pop(sample['resource_id'], None)
return
elif sample['counter_name'] != 'cpu_util':
return
if sample['project_id'] not in projects_map.keys():
projects_map[sample['project_id']] = {}
xosTenantInfo = getXosTenantInfo(sample['project_id'])
projects_map[sample['project_id']]['project_id'] = sample['project_id']
projects_map[sample['project_id']]['slice'] = xosTenantInfo['slice']
projects_map[sample['project_id']]['service'] = xosTenantInfo['service']
projects_map[sample['project_id']]['resources'] = {}
projects_map[sample['project_id']]['uthreadshold_count'] = 0
projects_map[sample['project_id']]['lthreadshold_count'] = 0
projects_map[sample['project_id']]['alarm'] = INITIAL_STATE
resource_map = projects_map[sample['project_id']]['resources']
if sample['resource_id'] not in resource_map.keys():
resource_map[sample['resource_id']] = {}
resource_map[sample['resource_id']]['xos_instance_info'] = getXosInstanceInfo(sample['resource_id'])
resource_map[sample['resource_id']]['queue'] = []
samples_queue = resource_map[sample['resource_id']]['queue']
sample = {'counter_name':sample['counter_name'],
'project_id':sample['project_id'],
'resource_id':sample['resource_id'],
'timestamp':sample['timestamp'],
'counter_unit':sample['counter_unit'],
'counter_volume':sample['counter_volume']}
deque = collections.deque(samples_queue, maxlen=10)
deque.append(sample)
resource_map[sample['resource_id']]['queue'] = list(deque)
def setup_webserver():
try:
#config = ConfigParser.ConfigParser()
#config.read('pub_sub.conf')
#webserver_host = config.get('WEB_SERVER','webserver_host')
#webserver_port = int (config.get('WEB_SERVER','webserver_port'))
#client_host = config.get('CLIENT','client_host')
#client_port = int (config.get('CLIENT','client_port'))
#log_level = config.get('LOGGING','level')
#log_file = config.get('LOGGING','filename')
#level = LEVELS.get(log_level, logging.NOTSET)
#logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',\
# datefmt=_DEFAULT_LOG_DATE_FORMAT,level=level)
webserver_host = '0.0.0.0'
webserver_port = 9991
except Exception as e:
print("* Error in config file:",e.__str__())
logging.error("* Error in confing file:%s",e.__str__())
else:
app.run(host=webserver_host,port=webserver_port,debug=True, use_reloader=False)
def main():
monitoring_channel = acquire_xos_monitoring_channel()
if not monitoring_channel:
print 'SRIKANTH: XOS monitoring_channel is not created... Create it before using this app'
return
loadAllXosTenantInfo()
loadAllXosInstanceInfo()
ceilometer_url = monitoring_channel['ceilometer_url']
if use_kafka:
thread.start_new(read_notification_from_ceilometer_over_kafka, ("10.11.10.1","9092","auto-scale",))
subscribe_data = {"sub_info":"cpu_util","app_id":"xos_auto_scale","target":"kafka://10.11.10.1:9092?topic=auto-scale"}
else:
thread.start_new(read_notification_from_ceilometer,(UDP_IP,UDP_PORT,))
subscribe_data = {"sub_info":"cpu_util","app_id":"xos_auto_scale","target":"udp://10.11.10.1:12346"}
subscribe_url = ceilometer_url + 'v2/subscribe'
response = requests.post(subscribe_url, data=json.dumps(subscribe_data))
print 'SRIKANTH: Ceilometer meter "cpu_util" Subscription status:%s' % response.text
#TODO: Fix the typo in 'sucess'
if (not 'sucess' in response.text) and (not 'already exists' in response.text):
print 'SRIKANTH: Ceilometer meter "cpu_util" Subscription unsuccessful...Exiting'
return
subscribe_data = {"sub_info":"instance","app_id":"xos_auto_scale2","target":"udp://10.11.10.1:12346"}
subscribe_url = ceilometer_url + 'v2/subscribe'
response = requests.post(subscribe_url, data=json.dumps(subscribe_data))
print 'SRIKANTH: Ceilometer meter "instance" Subscription status:%s' % response.text
#TODO: Fix the typo in 'sucess'
if (not 'sucess' in response.text) and (not 'already exists' in response.text):
print 'SRIKANTH: Ceilometer meter "instance"Subscription unsuccessful...Exiting'
return
periodic_cpu_threshold_evaluator()
periodic_print()
setup_webserver()
if __name__ == "__main__":
main()
| {
"content_hash": "9c6da4cd158d05da556f015cd7caa53b",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 220,
"avg_line_length": 48.93993993993994,
"alnum_prop": 0.6272933668773394,
"repo_name": "xmaruto/mcord",
"id": "848ccc062b28a60785a2df434ae5f575c5f41f68",
"size": "16297",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "applications/auto-scale/xos_auto_scaling_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "847306"
},
{
"name": "HTML",
"bytes": "732024"
},
{
"name": "JavaScript",
"bytes": "5293940"
},
{
"name": "Makefile",
"bytes": "13901"
},
{
"name": "Python",
"bytes": "1937152"
},
{
"name": "Shell",
"bytes": "49250"
}
],
"symlink_target": ""
} |
"""Support for Z-Wave cover devices."""
import logging
from typing import Any, Callable, List, Optional
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.model.value import Value as ZwaveValue
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_GARAGE,
DOMAIN as COVER_DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_CLIENT, DATA_UNSUBSCRIBE, DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .entity import ZWaveBaseEntity
LOGGER = logging.getLogger(__name__)
BARRIER_TARGET_CLOSE = 0
BARRIER_TARGET_OPEN = 255
BARRIER_STATE_CLOSED = 0
BARRIER_STATE_CLOSING = 252
BARRIER_STATE_STOPPED = 253
BARRIER_STATE_OPENING = 254
BARRIER_STATE_OPEN = 255
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Z-Wave Cover from Config Entry."""
client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
@callback
def async_add_cover(info: ZwaveDiscoveryInfo) -> None:
"""Add Z-Wave cover."""
entities: List[ZWaveBaseEntity] = []
if info.platform_hint == "motorized_barrier":
entities.append(ZwaveMotorizedBarrier(config_entry, client, info))
else:
entities.append(ZWaveCover(config_entry, client, info))
async_add_entities(entities)
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(
hass,
f"{DOMAIN}_{config_entry.entry_id}_add_{COVER_DOMAIN}",
async_add_cover,
)
)
def percent_to_zwave_position(value: int) -> int:
"""Convert position in 0-100 scale to 0-99 scale.
`value` -- (int) Position byte value from 0-100.
"""
if value > 0:
return max(1, round((value / 100) * 99))
return 0
class ZWaveCover(ZWaveBaseEntity, CoverEntity):
"""Representation of a Z-Wave Cover device."""
@property
def is_closed(self) -> Optional[bool]:
"""Return true if cover is closed."""
if self.info.primary_value.value is None:
# guard missing value
return None
return bool(self.info.primary_value.value == 0)
@property
def current_cover_position(self) -> Optional[int]:
"""Return the current position of cover where 0 means closed and 100 is fully open."""
if self.info.primary_value.value is None:
# guard missing value
return None
return round((self.info.primary_value.value / 99) * 100)
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
target_value = self.get_zwave_value("targetValue")
await self.info.node.async_set_value(
target_value, percent_to_zwave_position(kwargs[ATTR_POSITION])
)
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
target_value = self.get_zwave_value("targetValue")
await self.info.node.async_set_value(target_value, 99)
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
target_value = self.get_zwave_value("targetValue")
await self.info.node.async_set_value(target_value, 0)
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop cover."""
target_value = self.get_zwave_value("Open") or self.get_zwave_value("Up")
if target_value:
await self.info.node.async_set_value(target_value, False)
target_value = self.get_zwave_value("Close") or self.get_zwave_value("Down")
if target_value:
await self.info.node.async_set_value(target_value, False)
class ZwaveMotorizedBarrier(ZWaveBaseEntity, CoverEntity):
"""Representation of a Z-Wave motorized barrier device."""
def __init__(
self,
config_entry: ConfigEntry,
client: ZwaveClient,
info: ZwaveDiscoveryInfo,
) -> None:
"""Initialize a ZwaveMotorizedBarrier entity."""
super().__init__(config_entry, client, info)
self._target_state: ZwaveValue = self.get_zwave_value(
"targetState", add_to_watched_value_ids=False
)
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_GARAGE
@property
def is_opening(self) -> Optional[bool]:
"""Return if the cover is opening or not."""
if self.info.primary_value.value is None:
return None
return bool(self.info.primary_value.value == BARRIER_STATE_OPENING)
@property
def is_closing(self) -> Optional[bool]:
"""Return if the cover is closing or not."""
if self.info.primary_value.value is None:
return None
return bool(self.info.primary_value.value == BARRIER_STATE_CLOSING)
@property
def is_closed(self) -> Optional[bool]:
"""Return if the cover is closed or not."""
if self.info.primary_value.value is None:
return None
# If a barrier is in the stopped state, the only way to proceed is by
# issuing an open cover command. Return None in this case which
# produces an unknown state and allows it to be resolved with an open
# command.
if self.info.primary_value.value == BARRIER_STATE_STOPPED:
return None
return bool(self.info.primary_value.value == BARRIER_STATE_CLOSED)
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the garage door."""
await self.info.node.async_set_value(self._target_state, BARRIER_TARGET_OPEN)
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the garage door."""
await self.info.node.async_set_value(self._target_state, BARRIER_TARGET_CLOSE)
| {
"content_hash": "6dfcee27d86f88af6a7e5ea9c373aacd",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 94,
"avg_line_length": 35.74431818181818,
"alnum_prop": 0.6502940708949293,
"repo_name": "partofthething/home-assistant",
"id": "ff77bdb408dbc1f238d1a843ac0894ef17f695be",
"size": "6291",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zwave_js/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
SECURETRACK = "securetrack"
SECURECHANGE = "securechange" | {
"content_hash": "551f5bb299e5a5b6c4786677ccb83b2d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 28.5,
"alnum_prop": 0.8070175438596491,
"repo_name": "Tufin/pytos",
"id": "2f592e0b473ef974fa4b334aa73051b7de3f9dd2",
"size": "57",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytos/common/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11180"
},
{
"name": "Python",
"bytes": "1073816"
}
],
"symlink_target": ""
} |
"""This code example updates a user by adding " Sr." to the end of their name.
To determine which users exist, run get_all_users.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
USER_ID = 'INSERT_USER_ID_TO_UPDATE_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v202211')
# Create query.
statement = (ad_manager.StatementBuilder(version='v202211')
.Where('id = :userId')
.WithBindVariable('userId', int(user_id)))
# Get users by statement.
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
users = response['results']
for user in users:
user['name'] += ' Sr.'
# Update users on server.
users = user_service.updateUsers(users)
for user in users:
print('User with id "%s" and name "%s" was updated.'
% (user['id'], user['name']))
else:
print('No users found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, USER_ID)
| {
"content_hash": "38466cf9b7e94d7b6ed1971afb555167",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.6634146341463415,
"repo_name": "googleads/googleads-python-lib",
"id": "14f5a0fe0a5e63d19c1ab959d6f172477b18e783",
"size": "1852",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202211/user_service/update_users.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Servicios.usuario'
db.add_column(u'servicios_servicios', 'usuario',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Servicios.usuario'
db.delete_column(u'servicios_servicios', 'usuario_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'servicios.evaluacionservicio': {
'Meta': {'object_name': 'EvaluacionServicio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'servicios.fotosservicios': {
'Meta': {'object_name': 'FotosServicios'},
'foto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'servicio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servicios.Servicios']"})
},
u'servicios.organizacionbenefician': {
'Meta': {'object_name': 'OrganizacionBenefician'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'servicios.organizacionsolicita': {
'Meta': {'object_name': 'OrganizacionSolicita'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'servicios.resultadoevaluacion': {
'Meta': {'object_name': 'ResultadoEvaluacion'},
'escala': ('django.db.models.fields.IntegerField', [], {}),
'evaluacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servicios.EvaluacionServicio']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'servicio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servicios.Servicios']"})
},
u'servicios.servicios': {
'Meta': {'object_name': 'Servicios'},
'benefician_servicio': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['servicios.OrganizacionBenefician']", 'symmetrical': 'False'}),
'conclusiones': ('django.db.models.fields.TextField', [], {}),
'fecha': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fecha_finalizacion': ('django.db.models.fields.DateField', [], {}),
'fecha_inicio': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monto': ('django.db.models.fields.FloatField', [], {}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'objetivos': ('django.db.models.fields.TextField', [], {}),
'org_benefician': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['servicios.TiposOrganizacionBenefician']", 'symmetrical': 'False'}),
'solicita_servicio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servicios.OrganizacionSolicita']"}),
'temas_abordan': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['servicios.TemasAbordan']", 'symmetrical': 'False'}),
'tipos_servicios': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['servicios.TiposServicio']", 'symmetrical': 'False'}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'servicios.temasabordan': {
'Meta': {'object_name': 'TemasAbordan'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'servicios.tiposorganizacionbenefician': {
'Meta': {'object_name': 'TiposOrganizacionBenefician'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'servicios.tiposservicio': {
'Meta': {'object_name': 'TiposServicio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['servicios'] | {
"content_hash": "559a4640e099a1ad4c56dc9080d829d7",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 187,
"avg_line_length": 67.37190082644628,
"alnum_prop": 0.5619479882237488,
"repo_name": "CARocha/simasinnovacion",
"id": "86ee0e8504880d07f68ae7b3da930c39ac973fd6",
"size": "8176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servicios/migrations/0003_auto__add_field_servicios_usuario.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28801"
},
{
"name": "HTML",
"bytes": "285013"
},
{
"name": "JavaScript",
"bytes": "246003"
},
{
"name": "Python",
"bytes": "457836"
}
],
"symlink_target": ""
} |
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import pgettext_lazy
from ..core.utils.translations import TranslationProxy
from ..core.weight import WeightUnits
from . import AuthenticationBackends
from .patch_sites import patch_contrib_sites
patch_contrib_sites()
class SiteSettings(models.Model):
site = models.OneToOneField(
Site, related_name='settings', on_delete=models.CASCADE)
header_text = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=500, blank=True)
top_menu = models.ForeignKey(
'menu.Menu', on_delete=models.SET_NULL, related_name='+', blank=True,
null=True)
bottom_menu = models.ForeignKey(
'menu.Menu', on_delete=models.SET_NULL, related_name='+', blank=True,
null=True)
include_taxes_in_prices = models.BooleanField(default=True)
display_gross_prices = models.BooleanField(default=True)
charge_taxes_on_shipping = models.BooleanField(default=True)
track_inventory_by_default = models.BooleanField(default=True)
homepage_collection = models.ForeignKey(
'product.Collection', on_delete=models.SET_NULL, related_name='+',
blank=True, null=True)
default_weight_unit = models.CharField(
max_length=10, choices=WeightUnits.CHOICES,
default=WeightUnits.KILOGRAM)
automatic_fulfillment_digital_products = models.BooleanField(default=False)
default_digital_max_downloads = models.IntegerField(blank=True, null=True)
default_digital_url_valid_days = models.IntegerField(blank=True, null=True)
translated = TranslationProxy()
class Meta:
permissions = (
('manage_settings', pgettext_lazy(
'Permission description', 'Manage settings.')),
('manage_translations', pgettext_lazy(
'Permission description', 'Manage translations.')),)
def __str__(self):
return self.site.name
def available_backends(self):
return self.authorizationkey_set.values_list('name', flat=True)
class SiteSettingsTranslation(models.Model):
language_code = models.CharField(max_length=10)
site_settings = models.ForeignKey(
SiteSettings, related_name='translations', on_delete=models.CASCADE)
header_text = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=500, blank=True)
class Meta:
unique_together = (('language_code', 'site_settings'),)
def __repr__(self):
class_ = type(self)
return '%s(pk=%r, site_settings_pk=%r)' % (
class_.__name__, self.pk, self.site_settings_id)
def __str__(self):
return self.site_settings.site.name
class AuthorizationKey(models.Model):
site_settings = models.ForeignKey(SiteSettings, on_delete=models.CASCADE)
name = models.CharField(
max_length=20, choices=AuthenticationBackends.BACKENDS)
key = models.TextField()
password = models.TextField()
class Meta:
unique_together = (('site_settings', 'name'),)
def __str__(self):
return self.name
def key_and_secret(self):
return self.key, self.password
| {
"content_hash": "756077858e64c90ec80dfefe670a37cd",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 37.44186046511628,
"alnum_prop": 0.6863354037267081,
"repo_name": "UITools/saleor",
"id": "c9460921c12432c7c7ba0b9490cd64350dbd0ea5",
"size": "3220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/site/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
} |
"""
================================
Recognizing hand-written digits
================================
This example shows how scikit-learn can be used to recognize images of
hand-written digits, from 0-9.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
from sklearn.model_selection import train_test_split
###############################################################################
# Digits dataset
# --------------
#
# The digits dataset consists of 8x8
# pixel images of digits. The ``images`` attribute of the dataset stores
# 8x8 arrays of grayscale values for each image. We will use these arrays to
# visualize the first 4 images. The ``target`` attribute of the dataset stores
# the digit each image represents and this is included in the title of the 4
# plots below.
#
# Note: if we were working from image files (e.g., 'png' files), we would load
# them using :func:`matplotlib.pyplot.imread`.
digits = datasets.load_digits()
_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))
for ax, image, label in zip(axes, digits.images, digits.target):
ax.set_axis_off()
ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest")
ax.set_title("Training: %i" % label)
###############################################################################
# Classification
# --------------
#
# To apply a classifier on this data, we need to flatten the images, turning
# each 2-D array of grayscale values from shape ``(8, 8)`` into shape
# ``(64,)``. Subsequently, the entire dataset will be of shape
# ``(n_samples, n_features)``, where ``n_samples`` is the number of images and
# ``n_features`` is the total number of pixels in each image.
#
# We can then split the data into train and test subsets and fit a support
# vector classifier on the train samples. The fitted classifier can
# subsequently be used to predict the value of the digit for the samples
# in the test subset.
# flatten the images
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
clf = svm.SVC(gamma=0.001)
# Split data into 50% train and 50% test subsets
X_train, X_test, y_train, y_test = train_test_split(
data, digits.target, test_size=0.5, shuffle=False
)
# Learn the digits on the train subset
clf.fit(X_train, y_train)
# Predict the value of the digit on the test subset
predicted = clf.predict(X_test)
###############################################################################
# Below we visualize the first 4 test samples and show their predicted
# digit value in the title.
_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))
for ax, image, prediction in zip(axes, X_test, predicted):
ax.set_axis_off()
image = image.reshape(8, 8)
ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest")
ax.set_title(f"Prediction: {prediction}")
###############################################################################
# :func:`~sklearn.metrics.classification_report` builds a text report showing
# the main classification metrics.
print(
f"Classification report for classifier {clf}:\n"
f"{metrics.classification_report(y_test, predicted)}\n"
)
###############################################################################
# We can also plot a :ref:`confusion matrix <confusion_matrix>` of the
# true digit values and the predicted digit values.
disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, predicted)
disp.figure_.suptitle("Confusion Matrix")
print(f"Confusion matrix:\n{disp.confusion_matrix}")
plt.show()
###############################################################################
# If the results from evaluating a classifier are stored in the form of a
# :ref:`confusion matrix <confusion_matrix>` and not in terms of `y_true` and
# `y_pred`, one can still build a :func:`~sklearn.metrics.classification_report`
# as follows:
# The ground truth and predicted lists
y_true = []
y_pred = []
cm = disp.confusion_matrix
# For each cell in the confusion matrix, add the corresponding ground truths
# and predictions to the lists
for gt in range(len(cm)):
for pred in range(len(cm)):
y_true += [gt] * cm[gt][pred]
y_pred += [pred] * cm[gt][pred]
print(
"Classification report rebuilt from confusion matrix:\n"
f"{metrics.classification_report(y_true, y_pred)}\n"
)
| {
"content_hash": "7531d2894cba431fb2478ad062f17e37",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 80,
"avg_line_length": 35.703125,
"alnum_prop": 0.6339168490153173,
"repo_name": "vinayak-mehta/scikit-learn",
"id": "f760916d1f66e3862c113eedc65a393614bdc031",
"size": "4570",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/classification/plot_digits_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668672"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10468304"
},
{
"name": "Shell",
"bytes": "41758"
}
],
"symlink_target": ""
} |
import os
from .test_monitors import test_monitor_collection
def test_argument_converter(args):
'''
Takes an argparse namespace, and converts it into argument dictionaries.
Each argument dictionary is fed into a specific function or class in the
training script. e.g. admin_kwargs is the set of arguments to be fed
to the experiment Administrator.
'''
if args.debug:
args.email = None
args.verbose = True
args.batch_size = 3
args.epochs = 15
args.n_test = 50
args.lr = 0.1
args.period = 2
args.seed = 1
args.hidden = 1
args.iters = 2
args.lf = 1
return dict(
admin_kwargs=get_admin_kwargs(args),
data_loader_kwargs=get_data_loader_kwargs(args),
model_loading_kwargs=get_model_loading_kwargs(args),
)
def get_admin_kwargs(args):
return dict(
dataset=args.dataset,
debug=args.debug,
slurm_array_task_id=args.slurm_array_task_id,
slurm_array_job_id=args.slurm_array_job_id,
gpu=args.gpu,
seed=args.seed,
email_filename=args.email_filename,
silent=args.silent,
verbose=args.verbose,
cmd_line_args=args.cmd_line_args,
monitor_collection=test_monitor_collection(),
arg_string=args.arg_string,
root_dir=args.root_dir,
)
def get_data_loader_kwargs(args):
#data_dir = os.path.join(args.data_dir)
#leaves = args.model not in ['recs', 'recg']
leaves = True
return dict(
debug=args.debug,
data_dir=args.data_dir,
n_test=args.n_test,
batch_size=args.batch_size,
dataset=args.dataset,
preprocess=args.pp,
leaves=leaves
)
def get_model_loading_kwargs(args):
arg_list = [
'models_dir',
'model',
'single_model',
#'inventory'
]
return {k: v for k, v in vars(args).items() if k in arg_list}
| {
"content_hash": "3b758a5370a165555b554ba7f6af2bbb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 76,
"avg_line_length": 26.931506849315067,
"alnum_prop": 0.6012207527975585,
"repo_name": "isaachenrion/jets",
"id": "20cafd8e313f14d9eb2839256bb73b399e21aa6f",
"size": "1966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jets/test/test_argument_converter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11751"
},
{
"name": "Python",
"bytes": "258548"
},
{
"name": "Shell",
"bytes": "6358"
}
],
"symlink_target": ""
} |
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Module under test
import bokeh.protocol.message as message # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_create_header(monkeypatch) -> None:
message.Message.msgtype = "msgtype"
monkeypatch.setattr("bokeh.util.serialization.make_id", lambda: "msgid")
header = message.Message.create_header(request_id="bar")
assert set(header.keys()) == {'msgid', 'msgtype', 'reqid'}
assert header['msgtype'] == 'msgtype'
assert header['msgid'] == 'msgid'
assert header['reqid'] == 'bar'
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "2838552a9b68bdd035045af55d251179",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 41.027027027027025,
"alnum_prop": 0.25428194993412384,
"repo_name": "ericmjl/bokeh",
"id": "0a8e4d6b692d7a45bd9c629f0ae09ea169673cb4",
"size": "2022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/bokeh/protocol/test_message.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from bigdl.chronos.utils import deprecated
from bigdl.chronos.autots.deprecated.preprocessing.impute.abstract import BaseImpute
@deprecated('Please use `bigdl.chronos.data.TSDataset` instead.')
class LastFill(BaseImpute):
"""
Impute missing data with last seen value
"""
def __init__(self):
"""
Construct model for last filling method
"""
pass
def impute(self, df):
"""
impute data
:params df: input dataframe
:return: imputed dataframe
"""
df.iloc[0] = df.iloc[0].fillna(0)
return df.fillna(method='pad')
| {
"content_hash": "50b4d6c6c1eb08ffa0a0bf282996b59a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 25.59259259259259,
"alnum_prop": 0.6396526772793053,
"repo_name": "intel-analytics/BigDL",
"id": "8fc129b06e225d3bd17d1cb4c9fc82fd4c4f5490",
"size": "1278",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/chronos/src/bigdl/chronos/autots/deprecated/preprocessing/impute/LastFill.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "139304"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54112822"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8825782"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216148"
},
{
"name": "Shell",
"bytes": "848241"
}
],
"symlink_target": ""
} |
def orderedSequentialSearch(alist, item):
pos = 0
found = False
stop = False
while pos < len(alist) and not found and not stop:
if alist[pos] == item:
found = True
else:
if alist[pos] > item:
stop = True
else:
pos = pos+1
return found
| {
"content_hash": "f84e8bc00a59d1b1de4b8ee0332245cd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.48823529411764705,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "2ac00c7a63e473cfe97734057b3e347af6418ba6",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code-from-author-book/Listings-for-Second-Edition/listing_5_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from .models import Poster
def index(request):
posters = Poster.objects.all()
return render(request, 'posters/index.jade', {'posters': posters})
| {
"content_hash": "85530bd9c91ac73a84dfef5b877cd724",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 68,
"avg_line_length": 34.166666666666664,
"alnum_prop": 0.751219512195122,
"repo_name": "CMU-Robotics-Club/roboticsclub.org",
"id": "c9de6667b8309dbaeb37c1d9ae2cc8fd78d64fdf",
"size": "205",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "posters/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4725"
},
{
"name": "HTML",
"bytes": "33977"
},
{
"name": "JavaScript",
"bytes": "5079"
},
{
"name": "Python",
"bytes": "249072"
}
],
"symlink_target": ""
} |
"""
update/__init__.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2012 Exa Networks. All rights reserved.
Modified by Orange - 2014
"""
from copy import deepcopy
from bagpipe.exabgp.structure.address import AFI,SAFI
from bagpipe.exabgp.message import Message,prefix
from bagpipe.exabgp.message.update.attribute.mprnlri import MPRNLRI
from bagpipe.exabgp.message.update.attribute.mpurnlri import MPURNLRI
# =================================================================== Update
#def bgp_mp (self):
# if AttributeID.NEXT_HOP in self:
# if self[AttributeID.NEXT_HOP].next_hop.afi != AFI.ipv4:
# return MPRNLRI(self).pack()
# return ''
#
#def bgp_resdraw (self):
# if AttributeID.NEXT_HOP in self:
# if self[AttributeID.NEXT_HOP].next_hop.afi != AFI.ipv4:
# return MPURNLRI(self.afi,self.safi,self).pack()
# return ''
from bagpipe.exabgp.message.update.attribute import AttributeID
class Update (Message):
TYPE = chr(0x02)
# All the route must be of the same family and have the same next-hop
def __init__ (self,routes):
self.routes = routes
self.afi = routes[0].nlri.afi
self.safi = routes[0].nlri.safi
# The routes MUST have the same attributes ...
def announce (self,asn4,local_asn,remote_asn):
if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]:
nlri = ''.join([route.nlri.pack() for route in self.routes])
mp = ''
else:
nlri = ''
mp = MPRNLRI(self.routes).pack()
# FIXME: needs same fix as below for next hop ?
attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn)
return self._message(prefix('') + prefix(attr + mp) + nlri)
def update (self,asn4,local_asn,remote_asn):
if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]:
nlri = ''.join([route.nlri.pack() for route in self.routes])
mp = ''
attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn)
else:
nlri = ''
#mp = MPURNLRI(self.routes).pack() + MPRNLRI(self.routes).pack()
mp = MPRNLRI(self.routes).pack()
# remove NEXT_HOP from attributes, because it's already been encoded in the MPNLRI
if AttributeID.NEXT_HOP not in self.routes[0].attributes:
raise Exception("Routes advertised need a NEXT_HOP attribute")
attributes = deepcopy(self.routes[0].attributes)
del attributes[AttributeID.NEXT_HOP]
attr = attributes.bgp_announce(asn4,local_asn,remote_asn)
return self._message(prefix(nlri) + prefix(attr + mp) + nlri)
def withdraw (self,asn4=False,local_asn=None,remote_asn=None):
if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]:
nlri = ''.join([route.nlri.pack() for route in self.routes])
mp = ''
attr = ''
else:
nlri = ''
mp = MPURNLRI(self.routes).pack()
attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn)
return self._message(prefix(nlri) + prefix(attr + mp))
| {
"content_hash": "0cb1f50d239860ee75b57416b236ee19",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 85,
"avg_line_length": 34.55952380952381,
"alnum_prop": 0.6830864622803996,
"repo_name": "murat1985/bagpipe-bgp",
"id": "5984e09ffaaa17d5e278f882a7d8494a3ca61465",
"size": "2921",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bagpipe/exabgp/message/update/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "619974"
},
{
"name": "Shell",
"bytes": "12755"
}
],
"symlink_target": ""
} |
import operator
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
for op in ('add', 'mul', 'sub',
# Py2K
'div',
# end Py2K
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))
_notimplemented_ops = set(getattr(operators, op)
for op in ('like_op', 'notlike_op', 'ilike_op',
'notilike_op', 'between_op', 'in_op',
'notin_op', 'endswith_op', 'concat_op'))
class EvaluatorCompiler(object):
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_column(self, clause):
if 'parentmapper' in clause._annotations:
key = clause._annotations['parentmapper'].\
_columntoproperty[clause].key
else:
key = clause.key
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = map(self.process, clause.clauses)
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" %
clause.operator)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = map(self.process,
[clause.left, clause.right])
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
def visit_bindparam(self, clause):
val = clause.value
return lambda obj: val
| {
"content_hash": "c51222d79886c88ca183fd2c5bf77795",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 73,
"avg_line_length": 35.78761061946903,
"alnum_prop": 0.49727992087042533,
"repo_name": "denny820909/builder",
"id": "894ac139e791f901ff4a0b8aeb03c74c684b4d51",
"size": "4279",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/SQLAlchemy-0.8.0b2-py2.7-linux-x86_64.egg/sqlalchemy/orm/evaluator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import linebreaks, escape, striptags
from django.utils.translation import ugettext_lazy as _
from notification.models import Notice
from notification.atomformat import Feed
ITEMS_PER_FEED = getattr(settings, 'ITEMS_PER_FEED', 20)
DEFAULT_HTTP_PROTOCOL = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
class BaseNoticeFeed(Feed):
def item_id(self, notification):
return "%s://%s%s" % (
DEFAULT_HTTP_PROTOCOL,
Site.objects.get_current().domain,
notification.get_absolute_url(),
)
def item_title(self, notification):
return striptags(notification.message)
def item_updated(self, notification):
return notification.added
def item_published(self, notification):
return notification.added
def item_content(self, notification):
return {"type" : "html", }, linebreaks(escape(notification.message))
def item_links(self, notification):
return [{"href" : self.item_id(notification)}]
def item_authors(self, notification):
return [{"name" : notification.user.username}]
class NoticeUserFeed(BaseNoticeFeed):
def get_object(self, params):
return get_object_or_404(User, username=params[0].lower())
def feed_id(self, user):
return "%s://%s%s" % (
DEFAULT_HTTP_PROTOCOL,
Site.objects.get_current().domain,
reverse('notification_feed_for_user'),
)
def feed_title(self, user):
return _('Notices Feed')
def feed_updated(self, user):
qs = Notice.objects.filter(user=user)
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('added').added
def feed_links(self, user):
complete_url = "%s://%s%s" % (
DEFAULT_HTTP_PROTOCOL,
Site.objects.get_current().domain,
reverse('notification_notices'),
)
return ({'href': complete_url},)
def items(self, user):
return Notice.objects.notices_for(user).order_by("-added")[:ITEMS_PER_FEED]
| {
"content_hash": "46d714220e4778258753784eab9e5345",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 83,
"avg_line_length": 33.56410256410256,
"alnum_prop": 0.6504965622612682,
"repo_name": "edisonlz/fruit",
"id": "b05f51c61fac8d99e6cbd3b0d29f821af3d4d8df",
"size": "2618",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "web_project/base/site-packages/notification/feeds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1482"
},
{
"name": "Batchfile",
"bytes": "6714"
},
{
"name": "C",
"bytes": "3085"
},
{
"name": "C++",
"bytes": "4823"
},
{
"name": "CSS",
"bytes": "660927"
},
{
"name": "DIGITAL Command Language",
"bytes": "27853"
},
{
"name": "GAP",
"bytes": "6045"
},
{
"name": "Go",
"bytes": "13616"
},
{
"name": "Groff",
"bytes": "7199"
},
{
"name": "HTML",
"bytes": "7678961"
},
{
"name": "Java",
"bytes": "208173"
},
{
"name": "JavaScript",
"bytes": "2626051"
},
{
"name": "Makefile",
"bytes": "16810"
},
{
"name": "Nginx",
"bytes": "19215"
},
{
"name": "PHP",
"bytes": "205978"
},
{
"name": "Perl",
"bytes": "27627"
},
{
"name": "Python",
"bytes": "15609476"
},
{
"name": "Shell",
"bytes": "13663"
},
{
"name": "TeX",
"bytes": "60714"
}
],
"symlink_target": ""
} |
import os
import re
import IECore
import Gaffer
## Creates a custom Gaffer extension by exporting one or more Boxes
# as new node types defined in a python module. An associated startup
# file is generated to add the nodes to the node menu. If `directory`
# is placed on the GAFFER_EXTENSION_PATHS then the extension will be
# loaded automatically by Gaffer.
def exportExtension( name, boxes, directory ) :
pythonDir = os.path.join( directory, "python", name )
os.makedirs( pythonDir )
with open( os.path.join( pythonDir, "__init__.py" ), "w" ) as initFile :
for box in boxes :
with open( os.path.join( pythonDir, box.getName() + ".py" ), "w" ) as nodeFile :
nodeFile.write( __nodeDefinition( box, name ) )
initFile.write( "from {name} import {name}\n".format( name = box.getName() ) )
uiDir = os.path.join( directory, "python", name + "UI" )
os.makedirs( uiDir )
with open( os.path.join( uiDir, "__init__.py" ), "w" ) as initFile :
for box in boxes :
with open( os.path.join( uiDir, box.getName() + "UI.py" ), "w" ) as uiFile :
uiFile.write( __uiDefinition( box, name ) )
initFile.write( "import {name}UI\n".format( name = box.getName() ) )
startupDir = os.path.join( directory, "startup", "gui" )
os.makedirs( startupDir )
with open( os.path.join( startupDir, name + ".py" ), "w" ) as startupFile :
nodeMenuDefinition = []
for box in boxes :
nodeMenuPath = Gaffer.Metadata.value( box, "extension:nodeMenuItem" )
if not nodeMenuPath :
nodeMenuPath = "/{name}/{node}".format( name = name, node = box.getName() )
nodeMenuDefinition.append(
"nodeMenu.append( \"{nodeMenuPath}\", {name}.{node} )\n".format(
nodeMenuPath = nodeMenuPath,
name = name,
node = box.getName()
)
)
startupFile.write(
__startupTemplate.format(
name = name,
nodeMenuDefinition = "\n".join( nodeMenuDefinition )
)
)
__startupTemplate = """\
import GafferUI
import {name}
import {name}UI
nodeMenu = GafferUI.NodeMenu.acquire( application )
{nodeMenuDefinition}
"""
def __indent( text, n ) :
prefix = "\t" * n
return "\n".join( prefix + l for l in text.split( "\n" ) )
__nodeTemplate = """\
{imports}
class {name}( Gaffer.SubGraph ) :
def __init__( self, name = "{name}" ) :
Gaffer.SubGraph.__init__( self, name )
{constructor}
IECore.registerRunTimeTyped( {name}, typeName = "{extension}::{name}" )
"""
def __nodeDefinition( box, extension ) :
invisiblePlug = re.compile( "^__.*$" )
children = Gaffer.StandardSet()
for child in box.children() :
if isinstance( child, Gaffer.Node ) :
children.add( child )
elif isinstance( child, Gaffer.Plug ) :
if not invisiblePlug.match( child.getName() ) and child != box["user"] :
children.add( child )
with Gaffer.Context() as context :
context["serialiser:includeVersionMetadata"] = IECore.BoolData( False )
context["serialiser:protectParentNamespace"] = IECore.BoolData( False )
context["valuePlugSerialiser:resetParentPlugDefaults"] = IECore.BoolData( True )
context["plugSerialiser:includeParentPlugMetadata"] = IECore.BoolData( False )
constructor = Gaffer.Serialisation( box, "self", children ).result()
imports = { "import IECore", "import Gaffer" }
constructorLines = []
for line in constructor.split( "\n" ) :
if line.startswith( "import" ) :
imports.add( line )
else :
constructorLines.append( line )
return __nodeTemplate.format(
imports = "\n".join( sorted( imports ) ),
name = box.getName(),
constructor = __indent( "\n".join( constructorLines ), 2 ),
extension = extension
)
__uiTemplate = """\
import imath
import IECore
import Gaffer
import {extension}
Gaffer.Metadata.registerNode(
{extension}.{name},
{metadata}
{plugMetadata}
)
"""
def __uiDefinition( box, extension ) :
return __uiTemplate.format(
extension = extension,
name = box.getName(),
metadata = __indent( __metadata( box ), 1 ),
plugMetadata = __indent( __plugMetadata( box ), 1 )
)
def __metadata( graphComponent ) :
items = []
for k in Gaffer.Metadata.registeredValues( graphComponent, instanceOnly = True, persistentOnly = True ) :
v = Gaffer.Metadata.value( graphComponent, k )
items.append(
"{k}, {v},".format( k = repr( k ), v = IECore.repr( v ) )
)
return "\n".join( items )
def __plugMetadata( box ) :
items = []
def walkPlugs( graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) :
m = __metadata( graphComponent )
if m :
items.append(
"\"{name}\" : [\n{m}\n],\n".format(
name = graphComponent.relativeName( graphComponent.node() ),
m = __indent( m, 1 )
)
)
for plug in graphComponent.children( Gaffer.Plug ) :
walkPlugs( plug )
walkPlugs( box )
if items :
return "plugs = {\n\n" + __indent( "\n".join( items ), 1 ) + "\n}\n"
else :
return ""
| {
"content_hash": "69698cc6aa3458429d83b80255c18624",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 106,
"avg_line_length": 25.15625,
"alnum_prop": 0.6532091097308489,
"repo_name": "appleseedhq/gaffer",
"id": "911a10fefc91b01a49f31ef86daf6e6b3a2da7ea",
"size": "6621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/Gaffer/ExtensionAlgo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
from test import unit
import unittest
import tempfile
import os
import time
from shutil import rmtree
from hashlib import md5
from tempfile import mkdtemp
from test.unit import FakeLogger
from swift.obj import auditor
from swift.obj import server as object_server
from swift.obj.server import DiskFile, write_metadata, DATADIR
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
renamer, storage_directory
from swift.obj.replicator import invalidate_hash
from swift.common.exceptions import AuditException
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(os.path.join(self.devices, 'sdb'))
self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(
devices=self.devices,
mount_check='false')
self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
self.logger)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_object_audit_extra_data(self):
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
data = '0' * 1024
etag = md5()
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
pre_quarantines = self.auditor.quarantines
self.auditor.object_audit(
os.path.join(self.disk_file.datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(self.auditor.quarantines, pre_quarantines)
os.write(fd, 'extra_data')
self.auditor.object_audit(
os.path.join(self.disk_file.datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
data = '0' * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
pre_quarantines = self.auditor.quarantines
# remake so it will have metadata
self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
self.logger)
self.auditor.object_audit(
os.path.join(self.disk_file.datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(self.auditor.quarantines, pre_quarantines)
etag = md5()
etag.update('1' + '0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(fd, metadata)
self.auditor.object_audit(
os.path.join(self.disk_file.datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file.datadir, timestamp + '.data')
mkdirs(self.disk_file.datadir)
fp = open(path, 'w')
fp.write('0' * 1024)
fp.close()
invalidate_hash(os.path.dirname(self.disk_file.datadir))
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
pre_quarantines = self.auditor.quarantines
self.auditor.object_audit(
os.path.join(self.disk_file.datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_bad_args(self):
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
pre_errors = self.auditor.errors
self.auditor.object_audit(5, 'sda', '0')
self.assertEquals(self.auditor.errors, pre_errors + 1)
pre_errors = self.auditor.errors
self.auditor.object_audit('badpath', 'sda', '0')
self.assertEquals(self.auditor.errors, pre_errors) # just returns
def test_object_run_once_pass(self):
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
self.auditor.log_time = 0
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = self.auditor.quarantines
data = '0' * 1024
etag = md5()
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
self.disk_file.close()
self.auditor.audit_all_objects()
self.assertEquals(self.auditor.quarantines, pre_quarantines)
def test_object_run_once_no_sda(self):
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = self.auditor.quarantines
data = '0' * 1024
etag = md5()
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
self.disk_file.close()
os.write(fd, 'extra_data')
self.auditor.audit_all_objects()
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_run_once_multi_devices(self):
self.auditor = auditor.AuditorWorker(self.conf, self.logger)
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = self.auditor.quarantines
data = '0' * 10
etag = md5()
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
self.disk_file.close()
self.auditor.audit_all_objects()
self.disk_file = DiskFile(self.devices, 'sdb', '0', 'a', 'c',
'ob', self.logger)
data = '1' * 10
etag = md5()
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
self.disk_file.close()
os.write(fd, 'extra_data')
self.auditor.audit_all_objects()
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_run_fast_track_non_zero(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
data = '0' * 1024
etag = md5()
with self.disk_file.mkstemp() as fd:
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': str(normalize_timestamp(time.time())),
'Content-Length': str(os.fstat(fd).st_size),
}
self.disk_file.put(fd, metadata)
etag = md5()
etag.update('1' + '0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(fd, metadata)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.auditor.run_once(zero_byte_fps=50)
self.assertFalse(os.path.isdir(quarantine_path))
self.auditor.run_once()
self.assertTrue(os.path.isdir(quarantine_path))
def setup_bad_zero_byte(self, with_ts=False):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
ts_file_path = ''
if with_ts:
name_hash = hash_path('a', 'c', 'o')
dir_path = os.path.join(self.devices, 'sda',
storage_directory(DATADIR, '0', name_hash))
ts_file_path = os.path.join(dir_path, '99999.ts')
if not os.path.exists(dir_path):
mkdirs(dir_path)
fp = open(ts_file_path, 'w')
fp.close()
etag = md5()
with self.disk_file.mkstemp() as fd:
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': str(normalize_timestamp(time.time())),
'Content-Length': 10,
}
self.disk_file.put(fd, metadata)
etag = md5()
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(fd, metadata)
if self.disk_file.data_file:
return self.disk_file.data_file
return ts_file_path
def test_object_run_fast_track_all(self):
self.setup_bad_zero_byte()
self.auditor.run_once()
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
def test_object_run_fast_track_zero(self):
self.setup_bad_zero_byte()
self.auditor.run_once(zero_byte_fps=50)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
def test_with_tombstone(self):
ts_file_path = self.setup_bad_zero_byte(with_ts=True)
self.auditor.run_once()
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(ts_file_path.endswith('ts'))
self.assertTrue(os.path.exists(ts_file_path))
def test_sleeper(self):
auditor.SLEEP_BETWEEN_AUDITS = 0.10
my_auditor = auditor.ObjectAuditor(self.conf)
start = time.time()
my_auditor._sleep()
delta_t = time.time() - start
self.assert_(delta_t > 0.08)
self.assert_(delta_t < 0.12)
def test_object_run_fast_track_zero_check_closed(self):
rat = [False]
class FakeFile(DiskFile):
def close(self, verify_file=True):
rat[0] = True
DiskFile.close(self, verify_file=verify_file)
self.setup_bad_zero_byte()
was_df = object_server.DiskFile
try:
object_server.DiskFile = FakeFile
self.auditor.run_once(zero_byte_fps=50)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
self.assertTrue(rat[0])
finally:
object_server.DiskFile = was_df
def test_run_forever(self):
class StopForever(Exception):
pass
class ObjectAuditorMock(object):
check_args = ()
check_kwargs = {}
fork_called = 0
fork_res = 0
def mock_run(self, *args, **kwargs):
self.check_args = args
self.check_kwargs = kwargs
def mock_sleep(self):
raise StopForever('stop')
def mock_fork(self):
self.fork_called += 1
return self.fork_res
my_auditor = auditor.ObjectAuditor(dict(devices=self.devices,
mount_check='false',
zero_byte_files_per_second=89))
mocker = ObjectAuditorMock()
my_auditor.run_once = mocker.mock_run
my_auditor._sleep = mocker.mock_sleep
was_fork = os.fork
try:
os.fork = mocker.mock_fork
self.assertRaises(StopForever,
my_auditor.run_forever, zero_byte_fps=50)
self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 50)
self.assertEquals(mocker.fork_called, 0)
self.assertRaises(StopForever, my_auditor.run_forever)
self.assertEquals(mocker.fork_called, 1)
self.assertEquals(mocker.check_args, ())
mocker.fork_res = 1
self.assertRaises(StopForever, my_auditor.run_forever)
self.assertEquals(mocker.fork_called, 2)
self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 89)
finally:
os.fork = was_fork
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ba493acc574e36bb2add7158f3747f09",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 79,
"avg_line_length": 38.06149732620321,
"alnum_prop": 0.5474534597822269,
"repo_name": "Triv90/SwiftUml",
"id": "4cb61696bfde04270530ea8cc9d4e4fcb5f9a958",
"size": "14825",
"binary": false,
"copies": "2",
"ref": "refs/heads/AddUml",
"path": "test/unit/obj/test_auditor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2678359"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import os
import mozinfo
import shlex
import subprocess
import sys
# determine the platform-specific invocation of `ps`
if mozinfo.isMac:
psarg = '-Acj'
elif mozinfo.isLinux:
psarg = 'axwww'
else:
psarg = 'ax'
def ps(arg=psarg):
"""
python front-end to `ps`
http://en.wikipedia.org/wiki/Ps_%28Unix%29
returns a list of process dicts based on the `ps` header
"""
retval = []
process = subprocess.Popen(['ps', arg], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
header = None
for line in stdout.splitlines():
line = line.strip()
if header is None:
# first line is the header
header = line.split()
continue
split = line.split(None, len(header)-1)
process_dict = dict(zip(header, split))
retval.append(process_dict)
return retval
def running_processes(name, psarg=psarg, defunct=True):
"""
returns a list of
{'PID': PID of process (int)
'command': command line of process (list)}
with the executable named `name`.
- defunct: whether to return defunct processes
"""
retval = []
for process in ps(psarg):
command = process['COMMAND']
command = shlex.split(command)
if command[-1] == '<defunct>':
command = command[:-1]
if not command or not defunct:
continue
if 'STAT' in process and not defunct:
if process['STAT'] == 'Z+':
continue
prog = command[0]
basename = os.path.basename(prog)
if basename == name:
retval.append((int(process['PID']), command))
return retval
def get_pids(name):
"""Get all the pids matching name"""
if mozinfo.isWin:
# use the windows-specific implementation
import wpk
return wpk.get_pids(name)
else:
return [pid for pid,_ in running_processes(name)]
if __name__ == '__main__':
pids = set()
for i in sys.argv[1:]:
pids.update(get_pids(i))
for i in sorted(pids):
print i
| {
"content_hash": "a9a3e905a477bfbdb38f322544f226f1",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 67,
"avg_line_length": 27.513157894736842,
"alnum_prop": 0.5839311334289814,
"repo_name": "wilebeast/FireFox-OS",
"id": "ee34c081c2a2eb7efa23df09718c86dce0ae87c1",
"size": "2314",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "B2G/gecko/testing/mozbase/mozprocess/mozprocess/pid.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from django.apps.registry import Apps
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from django.db.migrations.exceptions import InvalidBasesError
from django.db.migrations.operations import (
AddField, AlterField, DeleteModel, RemoveField,
)
from django.db.migrations.state import (
ModelState, ProjectState, get_related_models_recursive,
)
from django.test import SimpleTestCase, override_settings
from django.test.utils import isolate_apps
from django.utils import six
from .models import (
FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager,
UnicodeModel,
)
class StateTests(SimpleTestCase):
"""
Tests state construction, rendering and modification by operations.
"""
def test_create(self):
"""
Tests making a ProjectState from an Apps
"""
new_apps = Apps(["migrations"])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = "migrations"
apps = new_apps
unique_together = ["name", "bio"]
index_together = ["bio", "age"]
class AuthorProxy(Author):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
ordering = ["name"]
class SubAuthor(Author):
width = models.FloatField(null=True)
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
title = models.CharField(max_length=1000)
author = models.ForeignKey(Author, models.CASCADE)
contributors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
verbose_name = "tome"
db_table = "test_tome"
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoManagers(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoDefaultManager(models.Model):
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
class Meta:
app_label = "migrations"
apps = new_apps
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
class FoodOrderedManagers(models.Model):
# The managers on this model should be ordered by their creation
# counter and not by the order in model body
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr2 = mgr2
food_mgr1 = mgr1
class Meta:
app_label = "migrations"
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
author_proxy_state = project_state.models['migrations', 'authorproxy']
sub_author_state = project_state.models['migrations', 'subauthor']
book_state = project_state.models['migrations', 'book']
food_state = project_state.models['migrations', 'food']
food_no_managers_state = project_state.models['migrations', 'foodnomanagers']
food_no_default_manager_state = project_state.models['migrations', 'foodnodefaultmanager']
food_order_manager_state = project_state.models['migrations', 'foodorderedmanagers']
self.assertEqual(author_state.app_label, "migrations")
self.assertEqual(author_state.name, "Author")
self.assertEqual([x for x, y in author_state.fields], ["id", "name", "bio", "age"])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertEqual(author_state.fields[2][1].null, False)
self.assertEqual(author_state.fields[3][1].null, True)
self.assertEqual(
author_state.options,
{"unique_together": {("name", "bio")}, "index_together": {("bio", "age")}}
)
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(book_state.app_label, "migrations")
self.assertEqual(book_state.name, "Book")
self.assertEqual([x for x, y in book_state.fields], ["id", "title", "author", "contributors"])
self.assertEqual(book_state.fields[1][1].max_length, 1000)
self.assertEqual(book_state.fields[2][1].null, False)
self.assertEqual(book_state.fields[3][1].__class__.__name__, "ManyToManyField")
self.assertEqual(book_state.options, {"verbose_name": "tome", "db_table": "test_tome"})
self.assertEqual(book_state.bases, (models.Model, ))
self.assertEqual(author_proxy_state.app_label, "migrations")
self.assertEqual(author_proxy_state.name, "AuthorProxy")
self.assertEqual(author_proxy_state.fields, [])
self.assertEqual(author_proxy_state.options, {"proxy": True, "ordering": ["name"]})
self.assertEqual(author_proxy_state.bases, ("migrations.author", ))
self.assertEqual(sub_author_state.app_label, "migrations")
self.assertEqual(sub_author_state.name, "SubAuthor")
self.assertEqual(len(sub_author_state.fields), 2)
self.assertEqual(sub_author_state.bases, ("migrations.author", ))
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertTrue(all(isinstance(name, six.text_type) for name, mgr in food_state.managers))
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
# No explicit managers defined. Migrations will fall back to the default
self.assertEqual(food_no_managers_state.managers, [])
# food_mgr is used in migration but isn't the default mgr, hence add the
# default
self.assertEqual([name for name, mgr in food_no_default_manager_state.managers],
['food_no_mgr', 'food_mgr'])
self.assertTrue(all(isinstance(name, six.text_type) for name, mgr in food_no_default_manager_state.managers))
self.assertEqual(food_no_default_manager_state.managers[0][1].__class__, models.Manager)
self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager)
self.assertEqual([name for name, mgr in food_order_manager_state.managers],
['food_mgr1', 'food_mgr2'])
self.assertTrue(all(isinstance(name, six.text_type) for name, mgr in food_order_manager_state.managers))
self.assertEqual([mgr.args for name, mgr in food_order_manager_state.managers],
[('a', 'b', 1, 2), ('x', 'y', 3, 4)])
def test_custom_default_manager_added_to_the_model_state(self):
"""
When the default manager of the model is a custom manager,
it needs to be added to the model state.
"""
new_apps = Apps(['migrations'])
custom_manager = models.Manager()
class Author(models.Model):
objects = models.TextField()
authors = custom_manager
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.managers, [('authors', custom_manager)])
def test_apps_bulk_update(self):
"""
StateApps.bulk_update() should update apps.ready to False and reset
the value afterwards.
"""
project_state = ProjectState()
apps = project_state.apps
with apps.bulk_update():
self.assertFalse(apps.ready)
self.assertTrue(apps.ready)
with self.assertRaises(ValueError):
with apps.bulk_update():
self.assertFalse(apps.ready)
raise ValueError()
self.assertTrue(apps.ready)
def test_render(self):
"""
Tests rendering a ProjectState into an Apps.
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
))
project_state.add_model(ModelState(
app_label="migrations",
name="SubTag",
fields=[
('tag_ptr', models.OneToOneField(
'migrations.Tag',
models.CASCADE,
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("awesome", models.BooleanField()),
],
bases=("migrations.Tag",),
))
base_mgr = models.Manager()
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
project_state.add_model(ModelState(
app_label="migrations",
name="Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
# The ordering we really want is objects, mgr1, mgr2
('default', base_mgr),
('food_mgr2', mgr2),
(b'food_mgr1', mgr1),
]
))
new_apps = project_state.apps
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100)
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False)
self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2)
Food = new_apps.get_model("migrations", "Food")
self.assertEqual([mgr.name for mgr in Food._meta.managers],
['default', 'food_mgr1', 'food_mgr2'])
self.assertTrue(all(isinstance(mgr.name, six.text_type) for mgr in Food._meta.managers))
self.assertEqual([mgr.__class__ for mgr in Food._meta.managers],
[models.Manager, FoodManager, FoodManager])
def test_render_model_inheritance(self):
class Book(models.Model):
title = models.CharField(max_length=1000)
class Meta:
app_label = "migrations"
apps = Apps()
class Novel(Book):
class Meta:
app_label = "migrations"
apps = Apps()
# First, test rendering individually
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(Novel)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent model is in the app registry, it should be fine
ModelState.from_model(Book).render(apps)
ModelState.from_model(Novel).render(apps)
def test_render_model_with_multiple_inheritance(self):
class Foo(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class Bar(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class FooBar(Foo, Bar):
class Meta:
app_label = "migrations"
apps = Apps()
class AbstractSubFooBar(FooBar):
class Meta:
abstract = True
apps = Apps()
class SubFooBar(AbstractSubFooBar):
class Meta:
app_label = "migrations"
apps = Apps()
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(FooBar)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent models are in the app registry, it should be fine
ModelState.from_model(Foo).render(apps)
self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model])
ModelState.from_model(Bar).render(apps)
self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model])
ModelState.from_model(FooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar'])
ModelState.from_model(SubFooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar'])
def test_render_project_dependencies(self):
"""
Tests that the ProjectState render method correctly renders models
to account for inter-model base dependencies.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class B(A):
class Meta:
app_label = "migrations"
apps = new_apps
class C(B):
class Meta:
app_label = "migrations"
apps = new_apps
class D(A):
class Meta:
app_label = "migrations"
apps = new_apps
class E(B):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
class F(D):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(D))
project_state.add_model(ModelState.from_model(E))
project_state.add_model(ModelState.from_model(F))
final_apps = project_state.apps
self.assertEqual(len(final_apps.get_models()), 6)
# Now make an invalid ProjectState and make sure it fails
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(F))
with self.assertRaises(InvalidBasesError):
project_state.apps
def test_render_unique_app_labels(self):
"""
Tests that the ProjectState render method doesn't raise an
ImproperlyConfigured exception about unique labels if two dotted app
names have the same last part.
"""
class A(models.Model):
class Meta:
app_label = "django.contrib.auth"
class B(models.Model):
class Meta:
app_label = "vendor.auth"
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(project_state.apps.get_models()), 2)
def test_add_relations(self):
"""
#24573 - Adding relations to existing models should reload the
referenced models too.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = 'something'
apps = new_apps
class B(A):
class Meta:
app_label = 'something'
apps = new_apps
class C(models.Model):
class Meta:
app_label = 'something'
apps = new_apps
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.apps # We need to work with rendered models
old_state = project_state.clone()
model_a_old = old_state.apps.get_model('something', 'A')
model_b_old = old_state.apps.get_model('something', 'B')
model_c_old = old_state.apps.get_model('something', 'C')
# Check that the relations between the old models are correct
self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old)
self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old)
operation = AddField('c', 'to_a', models.OneToOneField(
'something.A',
models.CASCADE,
related_name='from_c',
))
operation.state_forwards('something', project_state)
model_a_new = project_state.apps.get_model('something', 'A')
model_b_new = project_state.apps.get_model('something', 'B')
model_c_new = project_state.apps.get_model('something', 'C')
# Check that all models have changed
self.assertIsNot(model_a_old, model_a_new)
self.assertIsNot(model_b_old, model_b_new)
self.assertIsNot(model_c_old, model_c_new)
# Check that the relations between the old models still hold
self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old)
self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old)
# Check that the relations between the new models correct
self.assertIs(model_a_new._meta.get_field('b').related_model, model_b_new)
self.assertIs(model_b_new._meta.get_field('a_ptr').related_model, model_a_new)
self.assertIs(model_a_new._meta.get_field('from_c').related_model, model_c_new)
self.assertIs(model_c_new._meta.get_field('to_a').related_model, model_a_new)
def test_remove_relations(self):
"""
#24225 - Tests that relations between models are updated while
remaining the relations and references for models of an old state.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "something"
apps = new_apps
class B(models.Model):
to_a = models.ForeignKey(A, models.CASCADE)
class Meta:
app_label = "something"
apps = new_apps
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = RemoveField("b", "to_a")
operation.state_forwards("something", project_state)
# Tests that model from old_state still has the relation
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
# Same test for deleted model
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
old_state = project_state.clone()
operation = DeleteModel("b")
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
def test_self_relation(self):
"""
#24513 - Modifying an object pointing to itself would cause it to be
rendered twice and thus breaking its related M2M through objects.
"""
class A(models.Model):
to_a = models.ManyToManyField('something.A', symmetrical=False)
class Meta:
app_label = "something"
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model((ModelState.from_model(A)))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = AlterField(
model_name="a",
name="to_a",
field=models.ManyToManyField("something.A", symmetrical=False, blank=True)
)
# At this point the model would be rendered twice causing its related
# M2M through objects to point to an old copy and thus breaking their
# attribute lookup.
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
# Tests that the old model's _meta is still consistent
field_to_a_old = model_a_old._meta.get_field("to_a")
self.assertEqual(field_to_a_old.m2m_field_name(), "from_a")
self.assertEqual(field_to_a_old.m2m_reverse_field_name(), "to_a")
self.assertIs(field_to_a_old.related_model, model_a_old)
self.assertIs(field_to_a_old.remote_field.through._meta.get_field('to_a').related_model, model_a_old)
self.assertIs(field_to_a_old.remote_field.through._meta.get_field('from_a').related_model, model_a_old)
# Tests that the new model's _meta is still consistent
field_to_a_new = model_a_new._meta.get_field("to_a")
self.assertEqual(field_to_a_new.m2m_field_name(), "from_a")
self.assertEqual(field_to_a_new.m2m_reverse_field_name(), "to_a")
self.assertIs(field_to_a_new.related_model, model_a_new)
self.assertIs(field_to_a_new.remote_field.through._meta.get_field('to_a').related_model, model_a_new)
self.assertIs(field_to_a_new.remote_field.through._meta.get_field('from_a').related_model, model_a_new)
def test_equality(self):
"""
Tests that == and != are implemented correctly.
"""
# Test two things that should be equal
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
{},
None,
))
project_state.apps # Fill the apps cached property
other_state = project_state.clone()
self.assertEqual(project_state, project_state)
self.assertEqual(project_state, other_state)
self.assertEqual(project_state != project_state, False)
self.assertEqual(project_state != other_state, False)
self.assertNotEqual(project_state.apps, other_state.apps)
# Make a very small change (max_len 99) and see if that affects it
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=99)),
("hidden", models.BooleanField()),
],
{},
None,
))
self.assertNotEqual(project_state, other_state)
self.assertEqual(project_state == other_state, False)
def test_dangling_references_throw_error(self):
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Publisher(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
publisher = models.ForeignKey(Publisher, models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
class Magazine(models.Model):
authors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Publisher))
project_state.add_model(ModelState.from_model(Book))
project_state.add_model(ModelState.from_model(Magazine))
self.assertEqual(len(project_state.apps.get_models()), 4)
# now make an invalid one with a ForeignKey
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Book))
msg = (
"The field migrations.Book.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Book.publisher was declared with a lazy reference "
"to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
# And another with ManyToManyField.
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Magazine))
msg = (
"The field migrations.Magazine.authors was declared with a lazy reference "
"to 'migrations.author\', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Magazine_authors.author was declared with a lazy reference "
"to \'migrations.author\', but app 'migrations' doesn't provide model 'author'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
# And now with multiple models and multiple fields.
project_state.add_model(ModelState.from_model(Book))
msg = (
"The field migrations.Book.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Book.publisher was declared with a lazy reference "
"to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'.\n"
"The field migrations.Magazine.authors was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Magazine_authors.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
def test_real_apps(self):
"""
Tests that including real apps can resolve dangling FK errors.
This test relies on the fact that contenttypes is always loaded.
"""
new_apps = Apps()
class TestModel(models.Model):
ct = models.ForeignKey("contenttypes.ContentType", models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
# If we just stick it into an empty state it should fail
project_state = ProjectState()
project_state.add_model(ModelState.from_model(TestModel))
with self.assertRaises(ValueError):
project_state.apps
# If we include the real app it should succeed
project_state = ProjectState(real_apps=["contenttypes"])
project_state.add_model(ModelState.from_model(TestModel))
rendered_state = project_state.apps
self.assertEqual(
len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]),
1,
)
def test_ignore_order_wrt(self):
"""
Makes sure ProjectState doesn't include OrderWrt fields when
making from existing models.
"""
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
order_with_respect_to = "author"
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Book))
self.assertEqual(
[name for name, field in project_state.models["migrations", "book"].fields],
["id", "author"],
)
def test_manager_refer_correct_model_version(self):
"""
#24147 - Tests that managers refer to the correct version of a
historical model
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("hidden", models.BooleanField()),
],
managers=[
('food_mgr', FoodManager('a', 'b')),
('food_qs', FoodQuerySet.as_manager()),
]
))
old_model = project_state.apps.get_model('migrations', 'tag')
new_state = project_state.clone()
operation = RemoveField("tag", "hidden")
operation.state_forwards("migrations", new_state)
new_model = new_state.apps.get_model('migrations', 'tag')
self.assertIsNot(old_model, new_model)
self.assertIs(old_model, old_model.food_mgr.model)
self.assertIs(old_model, old_model.food_qs.model)
self.assertIs(new_model, new_model.food_mgr.model)
self.assertIs(new_model, new_model.food_qs.model)
self.assertIsNot(old_model.food_mgr, new_model.food_mgr)
self.assertIsNot(old_model.food_qs, new_model.food_qs)
self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model)
self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model)
def test_choices_iterator(self):
"""
#24483 - ProjectState.from_apps should not destructively consume
Field.choices iterators.
"""
new_apps = Apps(["migrations"])
choices = [('a', 'A'), ('b', 'B')]
class Author(models.Model):
name = models.CharField(max_length=255)
choice = models.CharField(max_length=255, choices=iter(choices))
class Meta:
app_label = "migrations"
apps = new_apps
ProjectState.from_apps(new_apps)
choices_field = Author._meta.get_field('choice')
self.assertEqual(list(choices_field.choices), choices)
class ModelStateTests(SimpleTestCase):
def test_custom_model_base(self):
state = ModelState.from_model(ModelWithCustomBase)
self.assertEqual(state.bases, (models.Model,))
def test_bound_field_sanity_check(self):
field = models.CharField(max_length=1)
field.model = models.Model
with self.assertRaisesMessage(ValueError, 'ModelState.fields cannot be bound to a model - "field" is.'):
ModelState('app', 'Model', [('field', field)])
def test_sanity_check_to(self):
field = models.ForeignKey(UnicodeModel, models.CASCADE)
with self.assertRaisesMessage(
ValueError,
'ModelState.fields cannot refer to a model class - "field.to" does. '
'Use a string reference instead.'
):
ModelState('app', 'Model', [('field', field)])
def test_sanity_check_through(self):
field = models.ManyToManyField('UnicodeModel')
field.remote_field.through = UnicodeModel
with self.assertRaisesMessage(
ValueError,
'ModelState.fields cannot refer to a model class - "field.through" does. '
'Use a string reference instead.'
):
ModelState('app', 'Model', [('field', field)])
def test_fields_immutability(self):
"""
Tests that rendering a model state doesn't alter its internal fields.
"""
apps = Apps()
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)])
Model = state.render(apps)
self.assertNotEqual(Model._meta.get_field('name'), field)
def test_repr(self):
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C'])
self.assertEqual(repr(state), "<ModelState: 'app.Model'>")
project_state = ProjectState()
project_state.add_model(state)
with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"):
project_state.apps
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_create_swappable(self):
"""
Tests making a ProjectState from an Apps with a swappable model
"""
new_apps = Apps(['migrations'])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = 'migrations'
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
author_state = ModelState.from_model(Author)
self.assertEqual(author_state.app_label, 'migrations')
self.assertEqual(author_state.name, 'Author')
self.assertEqual([x for x, y in author_state.fields], ['id', 'name', 'bio', 'age'])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertEqual(author_state.fields[2][1].null, False)
self.assertEqual(author_state.fields[3][1].null, True)
self.assertEqual(author_state.options, {'swappable': 'TEST_SWAPPABLE_MODEL'})
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(author_state.managers, [])
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_custom_manager_swappable(self):
"""
Tests making a ProjectState from unused models with custom managers
"""
new_apps = Apps(['migrations'])
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
food_state = ModelState.from_model(Food)
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
@isolate_apps('migrations', 'django.contrib.contenttypes')
def test_order_with_respect_to_private_field(self):
class PrivateFieldModel(models.Model):
content_type = models.ForeignKey('contenttypes.ContentType', models.CASCADE)
object_id = models.PositiveIntegerField()
private = GenericForeignKey()
class Meta:
order_with_respect_to = 'private'
state = ModelState.from_model(PrivateFieldModel)
self.assertNotIn('order_with_respect_to', state.options)
class RelatedModelsTests(SimpleTestCase):
def setUp(self):
self.apps = Apps(['migrations.related_models_app'])
def create_model(self, name, foreign_keys=[], bases=(), abstract=False, proxy=False):
test_name = 'related_models_app'
assert not (abstract and proxy)
meta_contents = {
'abstract': abstract,
'app_label': test_name,
'apps': self.apps,
'proxy': proxy,
}
meta = type(str("Meta"), tuple(), meta_contents)
if not bases:
bases = (models.Model,)
body = {
'Meta': meta,
'__module__': "__fake__",
}
fname_base = fname = '%s_%%d' % name.lower()
for i, fk in enumerate(foreign_keys, 1):
fname = fname_base % i
body[fname] = fk
return type(name, bases, body)
def assertRelated(self, model, needle):
self.assertEqual(
get_related_models_recursive(model),
{(n._meta.app_label, n._meta.model_name) for n in needle},
)
def test_unrelated(self):
A = self.create_model("A")
B = self.create_model("B")
self.assertRelated(A, [])
self.assertRelated(B, [])
def test_direct_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_direct_hidden_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE, related_name='+')])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_fk_through_proxy(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
D = self.create_model("D", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
self.assertRelated(A, [B, C, D])
self.assertRelated(B, [A, C, D])
self.assertRelated(C, [A, B, D])
self.assertRelated(D, [A, B, C])
def test_nested_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
C = self.create_model("C")
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_two_sided(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('A', models.CASCADE)])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_circle(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
C = self.create_model("C", foreign_keys=[models.ForeignKey('A', models.CASCADE)])
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_nested_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_nested_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
D = self.create_model("D")
E = self.create_model("E", bases=(D,))
F = self.create_model("F", bases=(C, E,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, C, D, E, F])
self.assertRelated(B, [A, C, D, E, F])
self.assertRelated(C, [A, B, D, E, F])
self.assertRelated(D, [A, B, C, E, F])
self.assertRelated(E, [A, B, C, D, F])
self.assertRelated(F, [A, B, C, D, E])
self.assertRelated(Y, [Z])
self.assertRelated(Z, [Y])
def test_base_to_base_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Y', models.CASCADE)])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_base_to_subclass_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Z', models.CASCADE)])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_direct_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B')])
B = self.create_model("B")
self.assertRelated(A, [A.a_1.rel.through, B])
self.assertRelated(B, [A, A.a_1.rel.through])
def test_direct_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A')])
self.assertRelated(A, [A.a_1.rel.through])
def test_intermediate_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A', through='T')])
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('A', models.CASCADE),
])
self.assertRelated(A, [T])
self.assertRelated(T, [A])
def test_intermediate_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
])
self.assertRelated(A, [B, T])
self.assertRelated(B, [A, T])
self.assertRelated(T, [A, B])
def test_intermediate_m2m_extern_fk(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
Z = self.create_model("Z")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
models.ForeignKey('Z', models.CASCADE),
])
self.assertRelated(A, [B, T, Z])
self.assertRelated(B, [A, T, Z])
self.assertRelated(T, [A, B, Z])
self.assertRelated(Z, [A, B, T])
def test_intermediate_m2m_base(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
S = self.create_model("S")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
], bases=(S,))
self.assertRelated(A, [B, S, T])
self.assertRelated(B, [A, S, T])
self.assertRelated(S, [A, B, T])
self.assertRelated(T, [A, B, S])
def test_generic_fk(self):
A = self.create_model("A", foreign_keys=[
models.ForeignKey('B', models.CASCADE),
GenericForeignKey(),
])
B = self.create_model("B", foreign_keys=[
models.ForeignKey('C', models.CASCADE),
])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,), abstract=True)
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_multiple_mixed_bases(self):
A = self.create_model("A", abstract=True)
M = self.create_model("M")
P = self.create_model("P")
Q = self.create_model("Q", bases=(P,), proxy=True)
Z = self.create_model("Z", bases=(A, M, Q))
# M has a pointer O2O field p_ptr to P
self.assertRelated(A, [M, P, Q, Z])
self.assertRelated(M, [P, Q, Z])
self.assertRelated(P, [M, Q, Z])
self.assertRelated(Q, [M, P, Z])
self.assertRelated(Z, [M, P, Q])
| {
"content_hash": "0d1450604cdf0ae54cbb192b787b96fb",
"timestamp": "",
"source": "github",
"line_count": 1183,
"max_line_length": 117,
"avg_line_length": 39.385460693153,
"alnum_prop": 0.584272315583886,
"repo_name": "hobarrera/django",
"id": "2e53e4f82578266688a38e85498bce22b5fe74ab",
"size": "46593",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/migrations/test_state.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170510"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11461168"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import random
import math
class Chromosome(object):
class PerfectMatch(Exception):
def __init___(self, perfectSpecimen):
Exception.__init__(self, "Perfect match found")
def __init__(self, chromosomeType, genes):
"""
:param chromosomeType: a template for the chromosome
:type chromosomeType: ChromosomeType
:param genes: a list of genes
:param mutationRate: the average number of genes that will be mutated in each round
:type mutationRate: float
:param mutationSTDEV: the standard deviation of the number of genes to be mutated each round
:type mutationSTDEV: float
:type genes: [Gene]
"""
self.chromosomeType = chromosomeType
self.genes = genes
self.fitness = None
self.perfectMatch = False
def copy(self):
"""
Create a deep copy of this chromosome
:rtype: Chromosome
"""
newGenes = []
for gene in self.genes:
newGenes.append(gene.copy())
return Chromosome(self.chromosomeType, newGenes)
def mutate(self, mutationRate, mutationSTDEV):
"""
Mutate genes with given probability
:param mutationRate: the average number of times that each gene should be mutated.
0.5 means each gene has a 50% of being mutated
2.0 means each gene will be mutated two times, on average
:type mutationRate: float
:param mutationSTDEV: genes are mutated a number of times depending on a normal distribution, This is the
standard deviation
"""
for gene in self.genes:
mutations = random.normalvariate(mutationRate, mutationSTDEV)
wholeMutations = int(math.floor(mutations))
partialMutations = mutationRate - wholeMutations
if partialMutations > random.uniform(0, 1):
wholeMutations += 1
for mutation in xrange(wholeMutations):
gene.mutate(self)
def doFitnessTest(self):
self.fitness = self.chromosomeType.fitnessFunction(self)
#we have tested the fitness. If it is still None, then a perfect match must have been found
if self.fitness is None:
self.perfectMatch = True
raise self.PerfectMatch(self)
def getFitness(self):
"""
Measure the fitness of this individual
:return:
"""
if self.perfectMatch:
return "perfect match"
elif self.fitness is None:
self.doFitnessTest()
return self.fitness
else:
return self.fitness
def __str__(self):
result = "[\n"
for gene in self.genes:
result += "\t" + str(gene) + "\n"
result += "]"
if self.fitness is not None:
result += " fitness: " + str(self.fitness)
return result
def __setitem__(self, key, value):
"""
Set the value of a particular gene
"""
for gene in self.genes:
if gene.geneType.description == key:
gene.value = value
break
def __getitem__(self, item):
"""
Get the value of a particular gene
:rtype: Gene
"""
for gene in self.genes:
if gene.geneType.description == item:
return gene.value
def __add__(self, other):
"""
Takes two chromosomes and returns a child chromosome that is a mix of their genes
:type other: Chromosome
:rtype: Chromosome
"""
newGenes = []
for geneIndex in xrange(len(self.genes)):
chosen = None
if self.genes[geneIndex].geneType.combiner is not None:
chosen = self.genes[geneIndex].geneType.combiner(self.genes[geneIndex], other.genes[geneIndex])
else:
chosen = random.choice((self.genes[geneIndex], other.genes[geneIndex]))
newGenes.append(chosen.copy())
return Chromosome(self.chromosomeType, newGenes)
def __lt__(self, other):
return self.getFitness() < other.getFitness()
def __gt__(self, other):
return self.getFitness() > other.getFitness()
def __le__(self, other):
return self.getFitness() <= other.getFitness()
def __ge__(self, other):
return self.getFitness() >= other.getFitness()
def __eq__(self, other):
return self.getFitness() == other.getFitness()
def __ne__(self, other):
return self.getFitness() != other.getFitness()
def data(self):
"""
Convert to a form that can easily be encorporated into a yaml file
"""
data = {}
data["fitness"] = self.getFitness()
data["genes"] = {}
for gene in self.genes:
data["genes"][gene.geneType.description] = gene.value
return data | {
"content_hash": "02106a9211d3e539d48fd6aa5b12bb11",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 113,
"avg_line_length": 33.70945945945946,
"alnum_prop": 0.5748647023451593,
"repo_name": "littley/pyvolution",
"id": "7ed323273dcd0b703182da5c919c7527ff0377b8",
"size": "4989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "internal/Chromosome.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35299"
}
],
"symlink_target": ""
} |
"""
Created on Thu Oct 15 12:53:42 2015
@author: Reed
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
import patsy
'''
script for basic exploratory analysis and naively pooled regression across all stores
'''
enriched_path = '../data/enriched/'
raw_path = '../data/raw/'
trainset = pd.read_csv(enriched_path + "train.csv")
train_reg =trainset[['Sales','Store','DayOfWeek','Assortment','CompetitionDistance','Promo','DaysOpen','StoreType','LastSchoolHoliday','LastPromo','CurrentPromo','LastSchoolHoliday']]
train_reg[['DayOfWeek','Store']]= train_reg[['DayOfWeek','Store']].astype(str)
train_reg_np = np.asarray(train_reg)
# run OLS regression
result_full = sm.ols(formula="Sales ~ Assortment + Store + DayOfWeek + CompetitionDistance + Promo + DaysOpen + StoreType + LastSchoolHoliday + LastPromo + CurrentPromo + LastSchoolHoliday", data=train_reg).fit()
result_full.summary()
#look at store 1
store_select = trainset.groupby('Store')
store1_Data = store_select.get_group(1)
store1_Data = store1_Data[['Sales','Date','Assortment','CompetitionDistance','Promo','DaysOpen','StoreType','LastSchoolHoliday','LastPromo','CurrentPromo','LastSchoolHoliday']]
store1_Data=store1_Data.reset_index()
plt.plot(store1_Data.index, store1_Data['Sales'])
plt.xlabel('time')
plt.ylabel('sales')
plt.title('store 1 sales over time')
#Add labels for month
#xmas 2013
store1_Data[290:310]
#xmas 2014
store1_Data[290:310]
#Easter 2014
store1_Data[385:405]
#Easter 2015
store1_Data[385:405]
#average sales by date
trainset_time_sales=trainset[['Sales','Date']]
trainset_time_sales=trainset_time_sales.groupby('Date').aggregate(np.mean).reset_index()
trainset_time_sales =trainset_time_sales.sort(columns='Date')
plt.plot(trainset_time_sales.index, trainset_time_sales['Sales'])
plt.xlabel('time')
plt.ylabel('sales')
plt.title('average sales over time')
#mean absolute residual
result_full.resid.abs().mean()
| {
"content_hash": "d67ee39559cf69c9ccb1de51d7fa131c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 212,
"avg_line_length": 30.47761194029851,
"alnum_prop": 0.718413320274241,
"repo_name": "bwpriest/rossmannsales",
"id": "f205576d050cc89c475872b09be956a9780857d0",
"size": "2067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/exploratory_regression.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25589"
}
],
"symlink_target": ""
} |
"""
Utility script used to run the flake8 linter over all the project Python
sources.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import subprocess
import sys
__all__ = [
'lint',
]
# -----------------------------------------------------------------------------
# Constants
_UTILS_DIR = os.path.abspath(os.path.dirname(__file__))
_PROJECT_DIR = os.path.dirname(_UTILS_DIR)
_REQUIRED_PACKAGES = [
'flake8',
'flake8-import-order',
]
_INSTALL_FLAKE8_MESSAGE = """
The flake8 and flake8-import-order Python packages are required for linting,
but these were not found on your system.
You can install these using:
python3 -m pip install flake8
python3 -m pip install flake8-import-order
For more help, see http://flake8.pycqa.org.
"""
# -----------------------------------------------------------------------------
# Helpers
def _is_package_installed(name):
"""Runs the pip command to check if a package is installed.
"""
command = [
sys.executable,
'-m', 'pip',
'show', '--quiet',
name,
]
with open(os.devnull, 'w') as devnull:
status = subprocess.call(command, stderr=devnull)
return not status
# -----------------------------------------------------------------------------
def lint(args, verbose=False):
all_packages_installed = all([
_is_package_installed(name)
for name in _REQUIRED_PACKAGES
])
if not all_packages_installed:
if verbose:
print(_INSTALL_FLAKE8_MESSAGE)
return 1
return subprocess.call(
[sys.executable, '-m', 'flake8'] + args,
cwd=_PROJECT_DIR,
universal_newlines=True)
if __name__ == '__main__':
sys.exit(lint(sys.argv[1:], verbose=True))
| {
"content_hash": "e9cb8e74c2b946f2bf0955bb37bf35f1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 21.392857142857142,
"alnum_prop": 0.5481357818586533,
"repo_name": "rudkx/swift",
"id": "260761a4bca2b6b366c8c2718023ff967060458b",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "utils/python_lint.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "45808"
},
{
"name": "C",
"bytes": "5375236"
},
{
"name": "C++",
"bytes": "46670890"
},
{
"name": "CMake",
"bytes": "676617"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2593"
},
{
"name": "Emacs Lisp",
"bytes": "57594"
},
{
"name": "LLVM",
"bytes": "74528"
},
{
"name": "Makefile",
"bytes": "2361"
},
{
"name": "Objective-C",
"bytes": "458866"
},
{
"name": "Objective-C++",
"bytes": "159669"
},
{
"name": "Python",
"bytes": "1956421"
},
{
"name": "Roff",
"bytes": "3683"
},
{
"name": "Ruby",
"bytes": "2132"
},
{
"name": "Shell",
"bytes": "211956"
},
{
"name": "Swift",
"bytes": "38081215"
},
{
"name": "Vim script",
"bytes": "20025"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
from pylib.gtest import gtest_test_instance
from pylib.instrumentation import instrumentation_test_instance
from pylib.junit import junit_test_instance
from pylib.linker import linker_test_instance
from pylib.monkey import monkey_test_instance
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_gtest_run
from pylib.local.device import local_device_instrumentation_test_run
from pylib.local.device import local_device_linker_test_run
from pylib.local.device import local_device_monkey_test_run
from pylib.local.device import local_device_perf_test_run
from pylib.local.machine import local_machine_environment
from pylib.local.machine import local_machine_junit_test_run
from pylib.perf import perf_test_instance
def _CreatePerfTestRun(args, env, test_instance):
if args.print_step:
return local_device_perf_test_run.PrintStep(
env, test_instance)
elif args.output_json_list:
return local_device_perf_test_run.OutputJsonList(
env, test_instance)
return local_device_perf_test_run.LocalDevicePerfTestRun(
env, test_instance)
def CreateTestRun(args, env, test_instance, error_func):
if isinstance(env, local_device_environment.LocalDeviceEnvironment):
if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
return local_device_gtest_run.LocalDeviceGtestRun(env, test_instance)
if isinstance(test_instance,
instrumentation_test_instance.InstrumentationTestInstance):
return (local_device_instrumentation_test_run
.LocalDeviceInstrumentationTestRun(env, test_instance))
if isinstance(test_instance, linker_test_instance.LinkerTestInstance):
return (local_device_linker_test_run
.LocalDeviceLinkerTestRun(env, test_instance))
if isinstance(test_instance, monkey_test_instance.MonkeyTestInstance):
return (local_device_monkey_test_run
.LocalDeviceMonkeyTestRun(env, test_instance))
if isinstance(test_instance,
perf_test_instance.PerfTestInstance):
return _CreatePerfTestRun(args, env, test_instance)
if isinstance(env, local_machine_environment.LocalMachineEnvironment):
if isinstance(test_instance, junit_test_instance.JunitTestInstance):
return (local_machine_junit_test_run
.LocalMachineJunitTestRun(env, test_instance))
error_func('Unable to create test run for %s tests in %s environment'
% (str(test_instance), str(env)))
| {
"content_hash": "205ed7f82eb380d41d3973b00ddae304",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 47.16981132075472,
"alnum_prop": 0.7592,
"repo_name": "chrisdickinson/nojs",
"id": "1a2872819d9c2fd09551f1017a320cb7a8e147e2",
"size": "2663",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "build/android/pylib/base/test_run_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "52243"
},
{
"name": "JavaScript",
"bytes": "55472"
},
{
"name": "Python",
"bytes": "16760"
}
],
"symlink_target": ""
} |
"""
-----------------------------------------------------------------------------
Copyright (c) 2009-2019, Shotgun Software Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Shotgun Software Inc nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Python 2/3 compatibility
from .lib import six
from .lib import sgsix
from .lib.six import BytesIO # used for attachment upload
from .lib.six.moves import map
import base64
from .lib.six.moves import http_cookiejar # used for attachment upload
import datetime
import logging
import uuid # used for attachment upload
import os
import re
import copy
import stat # used for attachment upload
import sys
import time
import json
from .lib.six.moves import urllib
import shutil # used for attachment download
from .lib.six.moves import http_client # Used for secure file upload.
from .lib.httplib2 import Http, ProxyInfo, socks, ssl_error_classes
from .lib.sgtimezone import SgTimezone
# Import Error and ResponseError (even though they're unused in this file) since they need
# to be exposed as part of the API.
from .lib.six.moves.xmlrpc_client import Error, ProtocolError, ResponseError # noqa
LOG = logging.getLogger("shotgun_api3")
"""
Logging instance for shotgun_api3
Provides a logging instance where log messages are sent during execution. This instance has no
handler associated with it.
.. seealso:: :ref:`logging`
"""
LOG.setLevel(logging.WARN)
def _is_mimetypes_broken():
"""
Checks if this version of Python ships with a broken version of mimetypes
:returns: True if the version of mimetypes is broken, False otherwise.
"""
# mimetypes is broken on Windows only and for Python 2.7.0 to 2.7.9 inclusively.
# We're bundling the version from 2.7.10.
# See bugs :
# http://bugs.python.org/issue9291 <- Fixed in 2.7.7
# http://bugs.python.org/issue21652 <- Fixed in 2.7.8
# http://bugs.python.org/issue22028 <- Fixed in 2.7.10
return (sys.platform == "win32" and
sys.version_info[0] == 2 and sys.version_info[1] == 7 and
sys.version_info[2] >= 0 and sys.version_info[2] <= 9)
if _is_mimetypes_broken():
from .lib import mimetypes as mimetypes
else:
import mimetypes
# mimetypes imported in version specific imports
mimetypes.add_type("video/webm", ".webm") # webm and mp4 seem to be missing
mimetypes.add_type("video/mp4", ".mp4") # from some OS/distros
SG_TIMEZONE = SgTimezone()
NO_SSL_VALIDATION = False
"""
Turns off hostname matching validation for SSL certificates
Sometimes there are cases where certificate validation should be disabled. For example, if you
have a self-signed internal certificate that isn't included in our certificate bundle, you may
not require the added security provided by enforcing this.
"""
try:
import ssl
except ImportError as e:
if "SHOTGUN_FORCE_CERTIFICATE_VALIDATION" in os.environ:
raise ImportError("%s. SHOTGUN_FORCE_CERTIFICATE_VALIDATION environment variable prevents "
"disabling SSL certificate validation." % e)
LOG.debug("ssl not found, disabling certificate validation")
NO_SSL_VALIDATION = True
# ----------------------------------------------------------------------------
# Version
__version__ = "3.3.1"
# ----------------------------------------------------------------------------
# Errors
class ShotgunError(Exception):
"""
Base for all Shotgun API Errors.
"""
pass
class ShotgunFileDownloadError(ShotgunError):
"""
Exception for file download-related errors.
"""
pass
class ShotgunThumbnailNotReady(ShotgunError):
"""
Exception for when trying to use a 'pending thumbnail' (aka transient thumbnail) in an operation
"""
pass
class Fault(ShotgunError):
"""
Exception when server-side exception detected.
"""
pass
class AuthenticationFault(Fault):
"""
Exception when the server side reports an error related to authentication.
"""
pass
class MissingTwoFactorAuthenticationFault(Fault):
"""
Exception when the server side reports an error related to missing two-factor authentication
credentials.
"""
pass
class UserCredentialsNotAllowedForSSOAuthenticationFault(Fault):
"""
Exception when the server is configured to use SSO. It is not possible to use
a username/password pair to authenticate on such server.
"""
pass
class UserCredentialsNotAllowedForOxygenAuthenticationFault(Fault):
"""
Exception when the server is configured to use Oxygen. It is not possible to use
a username/password pair to authenticate on such server.
"""
pass
# ----------------------------------------------------------------------------
# API
class ServerCapabilities(object):
"""
Container for the servers capabilities, such as version enabled features.
.. warning::
This class is part of the internal API and its interfaces may change at any time in
the future. Therefore, usage of this class is discouraged.
"""
def __init__(self, host, meta):
"""
ServerCapabilities.__init__
:param str host: Host name for the server excluding protocol.
:param dict meta: dict of meta data for the server returned from the info() api method.
:ivar str host:
:ivar dict server_info:
:ivar tuple version: Simple version of the Shotgun server. ``(major, minor, rev)``
:ivar bool is_dev: ``True`` if server is running a development version of the Shotgun
codebase.
"""
# Server host name
self.host = host
self.server_info = meta
# Version from server is major.minor.rev or major.minor.rev."Dev"
# Store version as tuple and check dev flag
try:
self.version = meta.get("version", None)
except AttributeError:
self.version = None
if not self.version:
raise ShotgunError("The ShotGrid Server didn't respond with a version number. "
"This may be because you are running an older version of "
"ShotGrid against a more recent version of the ShotGrid API. "
"For more information, please contact ShotGrid Support.")
if len(self.version) > 3 and self.version[3] == "Dev":
self.is_dev = True
else:
self.is_dev = False
self.version = tuple(self.version[:3])
self._ensure_json_supported()
def _ensure_support(self, feature, raise_hell=True):
"""
Checks the server version supports a given feature, raises an exception if it does not.
:param dict feature: dict where **version** key contains a 3 integer tuple indicating the
supported server version and **label** key contains a human-readable label str::
{ 'version': (5, 4, 4), 'label': 'project parameter }
:param bool raise_hell: Whether to raise an exception if the feature is not supported.
Defaults to ``True``
:raises: :class:`ShotgunError` if the current server version does not support ``feature``
"""
if not self.version or self.version < feature["version"]:
if raise_hell:
raise ShotgunError(
"%s requires server version %s or higher, "
"server is %s" % (feature["label"], _version_str(feature["version"]), _version_str(self.version))
)
return False
else:
return True
def _ensure_json_supported(self):
"""
Ensures server has support for JSON API endpoint added in v2.4.0.
"""
self._ensure_support({
"version": (2, 4, 0),
"label": "JSON API"
})
def ensure_include_archived_projects(self):
"""
Ensures server has support for archived Projects feature added in v5.3.14.
"""
self._ensure_support({
"version": (5, 3, 14),
"label": "include_archived_projects parameter"
})
def ensure_per_project_customization(self):
"""
Ensures server has support for per-project customization feature added in v5.4.4.
"""
return self._ensure_support({
"version": (5, 4, 4),
"label": "project parameter"
}, True)
def ensure_support_for_additional_filter_presets(self):
"""
Ensures server has support for additional filter presets feature added in v7.0.0.
"""
return self._ensure_support({
"version": (7, 0, 0),
"label": "additional_filter_presets parameter"
}, True)
def ensure_user_following_support(self):
"""
Ensures server has support for listing items a user is following, added in v7.0.12.
"""
return self._ensure_support({
"version": (7, 0, 12),
"label": "user_following parameter"
}, True)
def ensure_paging_info_without_counts_support(self):
"""
Ensures server has support for optimized pagination, added in v7.4.0.
"""
return self._ensure_support({
"version": (7, 4, 0),
"label": "optimized pagination"
}, False)
def ensure_return_image_urls_support(self):
"""
Ensures server has support for returning thumbnail URLs without additional round-trips, added in v3.3.0.
"""
return self._ensure_support({
"version": (3, 3, 0),
"label": "return thumbnail URLs"
}, False)
def __str__(self):
return "ServerCapabilities: host %s, version %s, is_dev %s"\
% (self.host, self.version, self.is_dev)
class ClientCapabilities(object):
"""
Container for the client capabilities.
.. warning::
This class is part of the internal API and its interfaces may change at any time in
the future. Therefore, usage of this class is discouraged.
:ivar str platform: The current client platform. Valid values are ``mac``, ``linux``,
``windows``, or ``None`` (if the current platform couldn't be determined).
:ivar str local_path_field: The SG field used for local file paths. This is calculated using
the value of ``platform``. Ex. ``local_path_mac``.
:ivar str py_version: Simple version of Python executable as a string. Eg. ``2.7``.
:ivar str ssl_version: Version of OpenSSL installed. Eg. ``OpenSSL 1.0.2g 1 Mar 2016``. This
info is only available in Python 2.7+ if the ssl module was imported successfully.
Defaults to ``unknown``
"""
def __init__(self):
system = sys.platform.lower()
if system == "darwin":
self.platform = "mac"
elif system.startswith("linux"):
self.platform = "linux"
elif system == "win32":
self.platform = "windows"
else:
self.platform = None
if self.platform:
self.local_path_field = "local_path_%s" % (self.platform)
else:
self.local_path_field = None
self.py_version = ".".join(str(x) for x in sys.version_info[:2])
# extract the OpenSSL version if we can. The version is only available in Python 2.7 and
# only if we successfully imported ssl
self.ssl_version = "unknown"
try:
self.ssl_version = ssl.OPENSSL_VERSION
except (AttributeError, NameError):
pass
def __str__(self):
return "ClientCapabilities: platform %s, local_path_field %s, "\
"py_verison %s, ssl version %s" % (self.platform, self.local_path_field,
self.py_version, self.ssl_version)
class _Config(object):
"""
Container for the client configuration.
"""
def __init__(self, sg):
"""
:param sg: Shotgun connection.
"""
self._sg = sg
self.max_rpc_attempts = 3
# rpc_attempt_interval stores the number of milliseconds to wait between
# request retries. By default, this will be 3000 milliseconds. You can
# override this by setting this property on the config like so:
#
# sg = Shotgun(site_name, script_name, script_key)
# sg.config.rpc_attempt_interval = 1000 # adjusting default interval
#
# Or by setting the ``SHOTGUN_API_RETRY_INTERVAL`` environment variable.
# In the case that the environment variable is already set, setting the
# property on the config will override it.
self.rpc_attempt_interval = 3000
# From http://docs.python.org/2.6/library/httplib.html:
# If the optional timeout parameter is given, blocking operations
# (like connection attempts) will timeout after that many seconds
# (if it is not given, the global default timeout setting is used)
self.timeout_secs = None
self.api_ver = "api3"
self.convert_datetimes_to_utc = True
self._records_per_page = None
self.api_key = None
self.script_name = None
self.user_login = None
self.user_password = None
self.auth_token = None
self.sudo_as_login = None
# Authentication parameters to be folded into final auth_params dict
self.extra_auth_params = None
# uuid as a string
self.session_uuid = None
self.scheme = None
self.server = None
self.api_path = None
# The raw_http_proxy reflects the exact string passed in
# to the Shotgun constructor. This can be useful if you
# need to construct a Shotgun API instance based on
# another Shotgun API instance.
self.raw_http_proxy = None
# if a proxy server is being used, the proxy_handler
# below will contain a urllib2.ProxyHandler instance
# which can be used whenever a request needs to be made.
self.proxy_handler = None
self.proxy_server = None
self.proxy_port = 8080
self.proxy_user = None
self.proxy_pass = None
self.session_token = None
self.authorization = None
self.no_ssl_validation = False
self.localized = False
def set_server_params(self, base_url):
"""
Set the different server related fields based on the passed in URL.
This will impact the following attributes:
- scheme: http or https
- api_path: usually /api3/json
- server: usually something.shotgunstudio.com
:param str base_url: The server URL.
:raises ValueError: Raised if protocol is not http or https.
"""
self.scheme, self.server, api_base, _, _ = \
urllib.parse.urlsplit(base_url)
if self.scheme not in ("http", "https"):
raise ValueError(
"base_url must use http or https got '%s'" % base_url
)
self.api_path = urllib.parse.urljoin(urllib.parse.urljoin(
api_base or "/", self.api_ver + "/"), "json"
)
@property
def records_per_page(self):
"""
The records per page value from the server.
"""
if self._records_per_page is None:
# Check for api_max_entities_per_page in the server info and change the record per page
# value if it is supplied.
self._records_per_page = self._sg.server_info.get("api_max_entities_per_page") or 500
return self._records_per_page
class Shotgun(object):
"""
Shotgun Client connection.
"""
# reg ex from
# http://underground.infovark.com/2008/07/22/iso-date-validation-regex/
# Note a length check is done before checking the reg ex
_DATE_PATTERN = re.compile(
r"^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])$")
_DATE_TIME_PATTERN = re.compile(
r"^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])"
r"(\D?([01]\d|2[0-3])\D?([0-5]\d)\D?([0-5]\d)?\D?(\d{3})?)?$")
_MULTIPART_UPLOAD_CHUNK_SIZE = 20000000
def __init__(self,
base_url,
script_name=None,
api_key=None,
convert_datetimes_to_utc=True,
http_proxy=None,
ensure_ascii=True,
connect=True,
ca_certs=None,
login=None,
password=None,
sudo_as_login=None,
session_token=None,
auth_token=None):
"""
Initializes a new instance of the Shotgun client.
:param str base_url: http or https url of the Shotgun server. Do not include the trailing
slash::
https://example.shotgunstudio.com
:param str script_name: name of the Script entity used to authenticate to the server.
If provided, then ``api_key`` must be as well, and neither ``login`` nor ``password``
can be provided.
.. seealso:: :ref:`authentication`
:param str api_key: API key for the provided ``script_name``. Used to authenticate to the
server. If provided, then ``script_name`` must be as well, and neither ``login`` nor
``password`` can be provided.
.. seealso:: :ref:`authentication`
:param bool convert_datetimes_to_utc: (optional) When ``True``, datetime values are converted
from local time to UTC time before being sent to the server. Datetimes received from
the server are then converted back to local time. When ``False`` the client should use
UTC date time values. Default is ``True``.
:param str http_proxy: (optional) URL for a proxy server to use for all connections. The
expected str format is ``[username:password@]111.222.333.444[:8080]``. Examples::
192.168.0.1
192.168.0.1:8888
joe:user@192.168.0.1:8888
:param bool connect: (optional) When ``True``, as soon as the :class:`~shotgun_api3.Shotgun`
instance is created, a connection will be made to the Shotgun server to determine the
server capabilities and confirm this version of the client is compatible with the server
version. This is mostly used for testing. Default is ``True``.
:param str ca_certs: (optional) path to an external SSL certificates file. By default, the
Shotgun API will use its own built-in certificates file which stores root certificates
for the most common Certificate Authorities (CAs). If you are using a corporate or
internal CA, or are packaging an application into an executable, it may be necessary to
point to your own certificates file. You can do this by passing in the full path to the
file via this parameter or by setting the environment variable ``SHOTGUN_API_CACERTS``.
In the case both are set, this parameter will take precedence.
:param str login: The user login str to use to authenticate to the server when using user-based
authentication. If provided, then ``password`` must be as well, and neither
``script_name`` nor ``api_key`` can be provided.
.. seealso:: :ref:`authentication`
:param str password: The password str to use to authenticate to the server when using user-based
authentication. If provided, then ``login`` must be as well and neither ``script_name``
nor ``api_key`` can be provided.
See :ref:`authentication` for more info.
:param str sudo_as_login: A user login string for the user whose permissions will be applied
to all actions. Event log entries will be generated showing this user performing all
actions with an additional extra meta-data parameter ``sudo_actual_user`` indicating the
script or user that is actually authenticated.
:param str session_token: The session token to use to authenticate to the server. This
can be used as an alternative to authenticating with a script user or regular user.
You can retrieve the session token by running the
:meth:`~shotgun_api3.Shotgun.get_session_token()` method.
.. todo: Add this info to the Authentication section of the docs
:param str auth_token: The authentication token required to authenticate to a server with
two-factor authentication turned on. If provided, then ``login`` and ``password`` must
be provided as well, and neither ``script_name`` nor ``api_key`` can be provided.
.. note:: These tokens can be short lived so a session is established right away if an
``auth_token`` is provided. A
:class:`~shotgun_api3.MissingTwoFactorAuthenticationFault` will be raised if the
``auth_token`` is invalid.
.. todo: Add this info to the Authentication section of the docs
.. note:: A note about proxy connections: If you are using Python <= v2.6.2, HTTPS
connections through a proxy server will not work due to a bug in the :mod:`urllib2`
library (see http://bugs.python.org/issue1424152). This will affect upload and
download-related methods in the Shotgun API (eg. :meth:`~shotgun_api3.Shotgun.upload`,
:meth:`~shotgun_api3.Shotgun.upload_thumbnail`,
:meth:`~shotgun_api3.Shotgun.upload_filmstrip_thumbnail`,
:meth:`~shotgun_api3.Shotgun.download_attachment`. Normal CRUD methods for passing JSON
data should still work fine. If you cannot upgrade your Python installation, you can see
the patch merged into Python v2.6.3 (http://hg.python.org/cpython/rev/0f57b30a152f/) and
try and hack it into your installation but YMMV. For older versions of Python there
are other patches that were proposed in the bug report that may help you as well.
"""
# verify authentication arguments
if session_token is not None:
if script_name is not None or api_key is not None:
raise ValueError("cannot provide both session_token "
"and script_name/api_key")
if login is not None or password is not None:
raise ValueError("cannot provide both session_token "
"and login/password")
if login is not None or password is not None:
if script_name is not None or api_key is not None:
raise ValueError("cannot provide both login/password "
"and script_name/api_key")
if login is None:
raise ValueError("password provided without login")
if password is None:
raise ValueError("login provided without password")
if script_name is not None or api_key is not None:
if script_name is None:
raise ValueError("api_key provided without script_name")
if api_key is None:
raise ValueError("script_name provided without api_key")
if auth_token is not None:
if login is None or password is None:
raise ValueError("must provide a user login and password with an auth_token")
if script_name is not None or api_key is not None:
raise ValueError("cannot provide an auth_code with script_name/api_key")
# Can't use 'all' with python 2.4
if len([x for x in [session_token, script_name, api_key, login, password] if x]) == 0:
if connect:
raise ValueError("must provide login/password, session_token or script_name/api_key")
self.config = _Config(self)
self.config.api_key = api_key
self.config.script_name = script_name
self.config.user_login = login
self.config.user_password = password
self.config.auth_token = auth_token
self.config.session_token = session_token
self.config.sudo_as_login = sudo_as_login
self.config.convert_datetimes_to_utc = convert_datetimes_to_utc
self.config.no_ssl_validation = NO_SSL_VALIDATION
self.config.raw_http_proxy = http_proxy
try:
self.config.rpc_attempt_interval = int(os.environ.get("SHOTGUN_API_RETRY_INTERVAL", 3000))
except ValueError:
retry_interval = os.environ.get("SHOTGUN_API_RETRY_INTERVAL", 3000)
raise ValueError("Invalid value '%s' found in environment variable "
"SHOTGUN_API_RETRY_INTERVAL, must be int." % retry_interval)
if self.config.rpc_attempt_interval < 0:
raise ValueError("Value of SHOTGUN_API_RETRY_INTERVAL must be positive, "
"got '%s'." % self.config.rpc_attempt_interval)
self._connection = None
self.__ca_certs = self._get_certs_file(ca_certs)
self.base_url = (base_url or "").lower()
self.config.set_server_params(self.base_url)
# if the service contains user information strip it out
# copied from the xmlrpclib which turned the user:password into
# and auth header
# Do NOT urlsplit(self.base_url) here, as it contains the lower case version
# of the base_url argument. Doing so would base64-encode the lowercase
# version of the credentials.
auth, self.config.server = urllib.parse.splituser(urllib.parse.urlsplit(base_url).netloc)
if auth:
auth = base64.encodestring(six.ensure_binary(urllib.parse.unquote(auth))).decode("utf-8")
self.config.authorization = "Basic " + auth.strip()
# foo:bar@123.456.789.012:3456
if http_proxy:
# check if we're using authentication. Start from the end since there might be
# @ in the user's password.
p = http_proxy.rsplit("@", 1)
if len(p) > 1:
self.config.proxy_user, self.config.proxy_pass = \
p[0].split(":", 1)
proxy_server = p[1]
else:
proxy_server = http_proxy
proxy_netloc_list = proxy_server.split(":", 1)
self.config.proxy_server = proxy_netloc_list[0]
if len(proxy_netloc_list) > 1:
try:
self.config.proxy_port = int(proxy_netloc_list[1])
except ValueError:
raise ValueError("Invalid http_proxy address '%s'. Valid "
"format is '123.456.789.012' or '123.456.789.012:3456'"
". If no port is specified, a default of %d will be "
"used." % (http_proxy, self.config.proxy_port))
# now populate self.config.proxy_handler
if self.config.proxy_user and self.config.proxy_pass:
auth_string = "%s:%s@" % (self.config.proxy_user, self.config.proxy_pass)
else:
auth_string = ""
proxy_addr = "http://%s%s:%d" % (auth_string, self.config.proxy_server, self.config.proxy_port)
self.config.proxy_handler = urllib.request.ProxyHandler({self.config.scheme: proxy_addr})
if ensure_ascii:
self._json_loads = self._json_loads_ascii
self.client_caps = ClientCapabilities()
# this relies on self.client_caps being set first
self.reset_user_agent()
self._server_caps = None
# test to ensure the the server supports the json API
# call to server will only be made once and will raise error
if connect:
self.server_caps
# When using auth_token in a 2FA scenario we need to switch to session-based
# authentication because the auth token will no longer be valid after a first use.
if self.config.auth_token is not None:
self.config.session_token = self.get_session_token()
self.config.user_login = None
self.config.user_password = None
self.config.auth_token = None
# ========================================================================
# API Functions
@property
def server_info(self):
"""
Property containing server information.
>>> sg.server_info
{'full_version': [6, 3, 15, 0], 'version': [6, 3, 15], ...}
.. note::
Beyond ``full_version`` and ``version`` which differ by the inclusion of the bugfix number,
you should expect these values to be unsupported and for internal use only.
:returns: dict of server information from :class:`ServerCapabilities` object
:rtype: dict
"""
return self.server_caps.server_info
@property
def server_caps(self):
"""
Property containing :class:`ServerCapabilities` object.
>>> sg.server_caps
<shotgun_api3.shotgun.ServerCapabilities object at 0x10120d350>
:returns: :class:`ServerCapabilities` object that describe the server the client is
connected to.
:rtype: :class:`ServerCapabilities` object
"""
if not self._server_caps or (self._server_caps.host != self.config.server):
self._server_caps = ServerCapabilities(self.config.server, self.info())
return self._server_caps
def connect(self):
"""
Connect client to the server if it is not already connected.
.. note:: The client will automatically connect to the server on demand. You only need to
call this function if you wish to confirm the client can connect.
"""
self._get_connection()
self.info()
return
def close(self):
"""
Close the current connection to the server.
If the client needs to connect again it will do so automatically.
"""
self._close_connection()
return
def info(self):
"""
Get API-related metadata from the Shotgun server.
>>> sg.info()
{'full_version': [8, 2, 1, 0], 'version': [8, 2, 1], 'user_authentication_method': 'default', ...}
::
Token Value
-------- ---------
full_version An ordered array of the full Shotgun version.
[major, minor, patch, hotfix]
version An ordered array of the Shotgun version.
[major, minor, patch]
user_authentication_method Indicates the authentication method used by Shotgun.
Will be one of the following values:
default: regular username/password.
ldap: username/password from the company's LDAP.
saml2: SSO used, over SAML2.
.. note::
Beyond the documented tokens, you should expect
the other values to be unsupported and for internal use only.
:returns: dict of the server metadata.
:rtype: dict
"""
return self._call_rpc("info", None, include_auth_params=False)
def find_one(self, entity_type, filters, fields=None, order=None, filter_operator=None, retired_only=False,
include_archived_projects=True, additional_filter_presets=None):
"""
Shortcut for :meth:`~shotgun_api3.Shotgun.find` with ``limit=1`` so it returns a single
result.
>>> sg.find_one("Asset", [["id", "is", 32]], ["id", "code", "sg_status_list"])
{'code': 'Gopher', 'id': 32, 'sg_status_list': 'ip', 'type': 'Asset'}
:param str entity_type: Shotgun entity type as a string to find.
:param list filters: list of filters to apply to the query.
.. seealso:: :ref:`filter_syntax`
:param list fields: Optional list of fields to include in each entity record returned.
Defaults to ``["id"]``.
:param int order: Optional list of fields to order the results by. List has the format::
[{'field_name':'foo', 'direction':'asc'}, {'field_name':'bar', 'direction':'desc'}]
Defaults to sorting by ``id`` in ascending order.
:param str filter_operator: Operator to apply to the filters. Supported values are ``"all"``
and ``"any"``. These are just another way of defining if the query is an AND or OR
query. Defaults to ``"all"``.
:param bool retired_only: Optional boolean when ``True`` will return only entities that have
been retried. Defaults to ``False`` which returns only entities which have not been
retired. There is no option to return both retired and non-retired entities in the
same query.
:param bool include_archived_projects: Optional boolean flag to include entities whose projects
have been archived. Defaults to ``True``.
:param additional_filter_presets: Optional list of presets to further filter the result
set, list has the form::
[{"preset_name": <preset_name>, <optional_param1>: <optional_value1>, ... }]
Note that these filters are ANDed together and ANDed with the 'filter'
argument.
For details on supported presets and the format of this parameter see
:ref:`additional_filter_presets`
:returns: Dictionary representing a single matching entity with the requested fields,
and the defaults ``"id"`` and ``"type"`` which are always included.
:rtype: dict
"""
results = self.find(entity_type, filters, fields, order, filter_operator, 1, retired_only,
include_archived_projects=include_archived_projects,
additional_filter_presets=additional_filter_presets)
if results:
return results[0]
return None
def find(self, entity_type, filters, fields=None, order=None, filter_operator=None, limit=0,
retired_only=False, page=0, include_archived_projects=True, additional_filter_presets=None):
"""
Find entities matching the given filters.
>>> # Find Character Assets in Sequence 100_FOO
>>> # -------------
>>> fields = ['id', 'code', 'sg_asset_type']
>>> sequence_id = 2 # Sequence "100_FOO"
>>> project_id = 4 # Demo Project
>>> filters = [
... ['project', 'is', {'type': 'Project', 'id': project_id}],
... ['sg_asset_type', 'is', 'Character'],
... ['sequences', 'is', {'type': 'Sequence', 'id': sequence_id}]
... ]
>>> assets= sg.find("Asset",filters,fields)
[{'code': 'Gopher', 'id': 32, 'sg_asset_type': 'Character', 'type': 'Asset'},
{'code': 'Cow', 'id': 33, 'sg_asset_type': 'Character', 'type': 'Asset'},
{'code': 'Bird_1', 'id': 35, 'sg_asset_type': 'Character', 'type': 'Asset'},
{'code': 'Bird_2', 'id': 36, 'sg_asset_type': 'Character', 'type': 'Asset'},
{'code': 'Bird_3', 'id': 37, 'sg_asset_type': 'Character', 'type': 'Asset'},
{'code': 'Raccoon', 'id': 45, 'sg_asset_type': 'Character', 'type': 'Asset'},
{'code': 'Wet Gopher', 'id': 149, 'sg_asset_type': 'Character', 'type': 'Asset'}]
You can drill through single entity links to filter on fields or display linked fields.
This is often called "deep linking" or using "dot syntax".
.. seealso:: :ref:`filter_syntax`
>>> # Find Versions created by Tasks in the Animation Pipeline Step
>>> # -------------
>>> fields = ['id', 'code']
>>> pipeline_step_id = 2 # Animation Step ID
>>> project_id = 4 # Demo Project
>>> # you can drill through single-entity link fields
>>> filters = [
... ['project','is', {'type': 'Project','id': project_id}],
... ['sg_task.Task.step.Step.id', 'is', pipeline_step_id]
>>> ]
>>> sg.find("Version", filters, fields)
[{'code': 'scene_010_anim_v001', 'id': 42, 'type': 'Version'},
{'code': 'scene_010_anim_v002', 'id': 134, 'type': 'Version'},
{'code': 'bird_v001', 'id': 137, 'type': 'Version'},
{'code': 'birdAltBlue_v002', 'id': 236, 'type': 'Version'}]
:param str entity_type: Shotgun entity type to find.
:param list filters: list of filters to apply to the query.
.. seealso:: :ref:`filter_syntax`
:param list fields: Optional list of fields to include in each entity record returned.
Defaults to ``["id"]``.
:param list order: Optional list of dictionaries defining how to order the results of the
query. Each dictionary contains the ``field_name`` to order by and the ``direction``
to sort::
[{'field_name':'foo', 'direction':'asc'}, {'field_name':'bar', 'direction':'desc'}]
Defaults to sorting by ``id`` in ascending order.
:param str filter_operator: Operator to apply to the filters. Supported values are ``"all"``
and ``"any"``. These are just another way of defining if the query is an AND or OR
query. Defaults to ``"all"``.
:param int limit: Optional limit to the number of entities to return. Defaults to ``0`` which
returns all entities that match.
:param int page: Optional page of results to return. Use this together with the ``limit``
parameter to control how your query results are paged. Defaults to ``0`` which returns
all entities that match.
:param bool retired_only: Optional boolean when ``True`` will return only entities that have
been retried. Defaults to ``False`` which returns only entities which have not been
retired. There is no option to return both retired and non-retired entities in the
same query.
:param bool include_archived_projects: Optional boolean flag to include entities whose projects
have been archived. Defaults to ``True``.
:param additional_filter_presets: Optional list of presets to further filter the result
set, list has the form::
[{"preset_name": <preset_name>, <optional_param1>: <optional_value1>, ... }]
Note that these filters are ANDed together and ANDed with the 'filter'
argument.
For details on supported presets and the format of this parameter see
:ref:`additional_filter_presets`
:returns: list of dictionaries representing each entity with the requested fields, and the
defaults ``"id"`` and ``"type"`` which are always included.
:rtype: list
"""
if not isinstance(limit, int) or limit < 0:
raise ValueError("limit parameter must be a positive integer")
if not isinstance(page, int) or page < 0:
raise ValueError("page parameter must be a positive integer")
if isinstance(filters, (list, tuple)):
filters = _translate_filters(filters, filter_operator)
elif filter_operator:
# TODO: Not sure if this test is correct, replicated from prev api
raise ShotgunError("Deprecated: Use of filter_operator for find() is not valid any more."
" See the documentation on find()")
if not include_archived_projects:
# This defaults to True on the server (no argument is sent)
# So we only need to check the server version if it is False
self.server_caps.ensure_include_archived_projects()
if additional_filter_presets:
self.server_caps.ensure_support_for_additional_filter_presets()
params = self._construct_read_parameters(entity_type,
fields,
filters,
retired_only,
order,
include_archived_projects,
additional_filter_presets)
if self.server_caps.ensure_return_image_urls_support():
params["api_return_image_urls"] = True
if self.server_caps.ensure_paging_info_without_counts_support():
paging_info_param = "return_paging_info_without_counts"
else:
paging_info_param = "return_paging_info"
params[paging_info_param] = False
if limit and limit <= self.config.records_per_page:
params["paging"]["entities_per_page"] = limit
# If page isn't set and the limit doesn't require pagination,
# then trigger the faster code path.
if page == 0:
page = 1
# if page is specified, then only return the page of records requested
if page != 0:
params["paging"]["current_page"] = page
records = self._call_rpc("read", params).get("entities", [])
return self._parse_records(records)
params[paging_info_param] = True
records = []
if self.server_caps.ensure_paging_info_without_counts_support():
has_next_page = True
while has_next_page:
result = self._call_rpc("read", params)
records.extend(result.get("entities"))
if limit and len(records) >= limit:
records = records[:limit]
break
has_next_page = result["paging_info"]["has_next_page"]
params["paging"]["current_page"] += 1
else:
result = self._call_rpc("read", params)
while result.get("entities"):
records.extend(result.get("entities"))
if limit and len(records) >= limit:
records = records[:limit]
break
if len(records) == result["paging_info"]["entity_count"]:
break
params["paging"]["current_page"] += 1
result = self._call_rpc("read", params)
return self._parse_records(records)
def _construct_read_parameters(self,
entity_type,
fields,
filters,
retired_only,
order,
include_archived_projects,
additional_filter_presets):
params = {}
params["type"] = entity_type
params["return_fields"] = fields or ["id"]
params["filters"] = filters
params["return_only"] = (retired_only and "retired") or "active"
params["paging"] = {"entities_per_page": self.config.records_per_page,
"current_page": 1}
if additional_filter_presets:
params["additional_filter_presets"] = additional_filter_presets
if include_archived_projects is False:
# Defaults to True on the server, so only pass it if it's False
params["include_archived_projects"] = False
if order:
sort_list = []
for sort in order:
if "column" in sort:
# TODO: warn about deprecation of 'column' param name
sort["field_name"] = sort["column"]
sort.setdefault("direction", "asc")
sort_list.append({
"field_name": sort["field_name"],
"direction": sort["direction"]
})
params["sorts"] = sort_list
return params
def _add_project_param(self, params, project_entity):
if project_entity and self.server_caps.ensure_per_project_customization():
params["project"] = project_entity
return params
def summarize(self,
entity_type,
filters,
summary_fields,
filter_operator=None,
grouping=None,
include_archived_projects=True):
"""
Summarize field data returned by a query.
This provides the same functionality as the summaries in the UI. You can specify one or
more fields to summarize, choose the summary type for each, and optionally group the
results which will return summary information for each group as well as the total for
the query.
**Example: Count all Assets for a Project**
>>> sg.summarize(entity_type='Asset',
... filters = [['project', 'is', {'type':'Project', 'id':4}]],
... summary_fields=[{'field':'id', 'type':'count'}])
{'groups': [], 'summaries': {'id': 15}}
``summaries`` contains the total summary for the query. Each key is the field summarized
and the value is the result of the summary operation for the entire result set.
.. note::
You cannot perform more than one summary on a field at a time, but you can summarize
several different fields in the same call.
**Example: Count all Assets for a Project, grouped by sg_asset_type**
>>> sg.summarize(entity_type='Asset',
... filters=[['project', 'is', {'type': 'Project', 'id': 4}]],
... summary_fields=[{'field': 'id', 'type': 'count'}],
... grouping=[{'field': 'sg_asset_type', 'type': 'exact', 'direction': 'asc'}])
{'groups': [{'group_name': 'Character','group_value': 'Character', 'summaries': {'id': 3}},
{'group_name': 'Environment','group_value': 'Environment', 'summaries': {'id': 3}},
{'group_name': 'Matte Painting', 'group_value': 'Matte Painting', 'summaries': {'id': 1}},
{'group_name': 'Prop', 'group_value': 'Prop', 'summaries': {'id': 4}},
{'group_name': 'Vehicle', 'group_value': 'Vehicle', 'summaries': {'id': 4}}],
'summaries': {'id': 15}}
- ``summaries`` contains the total summary for the query.
- ``groups`` contains the summary for each group.
- ``group_name`` is the display name for the group.
- ``group_value`` is the actual value of the grouping value. This is often the same as
``group_name`` but in the case when grouping by entity, the ``group_name`` may be
``PuppyA`` and the group_value would be
``{'type':'Asset','id':922,'name':'PuppyA'}``.
- ``summaries`` contains the summary calculation dict for each field requested.
**Example: Count all Tasks for a Sequence and find the latest due_date**
>>> sg.summarize(entity_type='Task',
... filters = [
... ['entity.Shot.sg_sequence', 'is', {'type':'Sequence', 'id':2}],
... ['sg_status_list', 'is_not', 'na']],
... summary_fields=[{'field':'id', 'type':'count'},
... {'field':'due_date','type':'latest'}])
{'groups': [], 'summaries': {'due_date': '2013-07-05', 'id': 30}}
This shows that the there are 30 Tasks for Shots in the Sequence and the latest ``due_date``
of any Task is ``2013-07-05``.
**Example: Count all Tasks for a Sequence, find the latest due_date and group by Shot**
>>> sg.summarize(entity_type='Task',
... filters = [
... ['entity.Shot.sg_sequence', 'is', {'type': 'Sequence', 'id': 2}],
... ['sg_status_list', 'is_not', 'na']],
... summary_fields=[{'field': 'id', 'type': 'count'}, {'field': 'due_date', 'type': 'latest'}],
... grouping=[{'field': 'entity', 'type': 'exact', 'direction': 'asc'}]))
{'groups': [{'group_name': 'shot_010',
'group_value': {'id': 2, 'name': 'shot_010', 'type': 'Shot', 'valid': 'valid'},
'summaries': {'due_date': '2013-06-18', 'id': 10}},
{'group_name': 'shot_020',
'group_value': {'id': 3, 'name': 'shot_020', 'type': 'Shot', 'valid': 'valid'},
'summaries': {'due_date': '2013-06-28', 'id': 10}},
{'group_name': 'shot_030',
'group_value': {'id': 4, 'name': 'shot_030', 'type': 'Shot', 'valid': 'valid'},
'summaries': {'due_date': '2013-07-05', 'id': 10}}],
'summaries': {'due_date': '2013-07-05', 'id': 30}}
This shows that the there are 30 Tasks for Shots in the Sequence and the latest ``due_date``
of any Task is ``2013-07-05``. Because the summary is grouped by ``entity``, we can also
see the summaries for each Shot returned. Each Shot has 10 Tasks and the latest ``due_date``
for each Shot. The difference between ``group_name`` and ``group_value`` is highlighted in
this example as the name of the Shot is different from its value.
**Example: Count all Tasks for a Sequence, find the latest due_date, group by Shot and
Pipeline Step**
>>> sg.summarize(entity_type='Task',
... filters = [
... ['entity.Shot.sg_sequence', 'is', {'type': 'Sequence', 'id': 2}],
... ['sg_status_list', 'is_not', 'na']],
... summary_fields=[{'field': 'id', 'type': 'count'},
... {'field': 'due_date', 'type': 'latest'}],
... grouping=[{'field': 'entity', 'type': 'exact', 'direction': 'asc'},
... {'field': 'step', 'type': 'exact', 'direction': 'asc'}])
{'groups': [{'group_name': 'shot_010',
'group_value': {'id': 2, 'name': 'shot_010', 'type': 'Shot', 'valid': 'valid'},
'groups': [{'group_name': 'Client',
'group_value': {'id': 1, 'name': 'Client', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-05-04', 'id': 1}},
{'group_name': 'Online',
'group_value': {'id': 2, 'name': 'Online', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-05-05', 'id': 1}},
...
... truncated for brevity
...
{'group_name': 'Comp',
'group_value': {'id': 8, 'name': 'Comp', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-06-18', 'id': 1}}],
'summaries': {'due_date': '2013-06-18', 'id': 10}},
{'group_name': 'shot_020',
'group_value': {'id': 3, 'name': 'shot_020', 'type': 'Shot', 'valid': 'valid'},
'groups': [{'group_name': 'Client',
'group_value': {'id': 1, 'name': 'Client', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-05-15', 'id': 1}},
{'group_name': 'Online',
'group_value': {'id': 2, 'name': 'Online', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-05-16', 'id': 1}},
...
... truncated for brevity
...
{'group_name': 'Comp',
'group_value': {'id': 8, 'name': 'Comp', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-06-28', 'id': 1}}],
'summaries': {'due_date': '2013-06-28', 'id': 10}},
{'group_name': 'shot_030',
'group_value': {'id': 4, 'name': 'shot_030', 'type': 'Shot', 'valid': 'valid'},
'groups': [{'group_name': 'Client',
'group_value': {'id': 1, 'name': 'Client', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-05-20', 'id': 1}},
{'group_name': 'Online',
'group_value': {'id': 2, 'name': 'Online', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-05-21', 'id': 1}},
...
... truncated for brevity
...
{'group_name': 'Comp',
'group_value': {'id': 8, 'name': 'Comp', 'type': 'Step', 'valid': 'valid'},
'summaries': {'due_date': '2013-07-05', 'id': 1}}],
'summaries': {'due_date': '2013-07-05', 'id': 10}}],
'summaries': {'due_date': '2013-07-05', 'id': 30}}
When grouping my more than one field, the grouping structure is repeated for each sub-group
and summary values are returned for each group on each level.
:param str entity_type: The entity type to summarize
:param list filters: A list of conditions used to filter the find query. Uses the same
syntax as :meth:`~shotgun_api3.Shotgun.find` method.
:param list summary_fields: A list of dictionaries with the following keys:
:field: The internal Shotgun field name you are summarizing.
:type: The type of summary you are performing on the field. Summary types can be any of
``record_count``, ``count``, ``sum``, ``maximum``, ``minimum``, ``average``,
``earliest``, ``latest``, ``percentage``, ``status_percentage``, ``status_list``,
``checked``, ``unchecked`` depending on the type of field you're summarizing.
:param str filter_operator: Operator to apply to the filters. Supported values are ``"all"``
and ``"any"``. These are just another way of defining if the query is an AND or OR
query. Defaults to ``"all"``.
:param list grouping: Optional list of dicts with the following keys:
:field: a string indicating the internal Shotgun field name on ``entity_type`` to
group results by.
:type: A string indicating the type of grouping to perform for each group.
Valid types depend on the type of field you are grouping on and can be one of
``exact``, ``tens``, ``hundreds``, ``thousands``, ``tensofthousands``,
``hundredsofthousands``, ``millions``, ``day``, ``week``, ``month``,
``quarter``,``year``, ``clustered_date``, ``oneday``, ``fivedays``,
``entitytype``, ``firstletter``.
:direction: A string that sets the order to display the grouped results. Valid
options are ``asc`` and ``desc``. Defaults to ``asc``.
:returns: dictionary containing grouping and summaries keys.
:rtype: dict
"""
if not isinstance(grouping, list) and grouping is not None:
msg = "summarize() 'grouping' parameter must be a list or None"
raise ValueError(msg)
if isinstance(filters, (list, tuple)):
filters = _translate_filters(filters, filter_operator)
if not include_archived_projects:
# This defaults to True on the server (no argument is sent)
# So we only need to check the server version if it is False
self.server_caps.ensure_include_archived_projects()
params = {"type": entity_type,
"summaries": summary_fields,
"filters": filters}
if include_archived_projects is False:
# Defaults to True on the server, so only pass it if it's False
params["include_archived_projects"] = False
if grouping is not None:
params["grouping"] = grouping
records = self._call_rpc("summarize", params)
return records
def create(self, entity_type, data, return_fields=None):
"""
Create a new entity of the specified ``entity_type``.
>>> data = {
... "project": {"type": "Project", "id": 161},
... "sg_sequence": {"type": "Sequence", "id": 109},
... "code": "001_100",
... 'sg_status_list': "ip"
... }
>>> sg.create('Shot', data)
{'code': '001_100',
'id': 2557,
'project': {'id': 161, 'name': 'Pied Piper', 'type': 'Project'},
'sg_sequence': {'id': 109, 'name': 'Sequence 001', 'type': 'Sequence'},
'sg_status_list': 'ip',
'type': 'Shot'}
:param str entity_type: Shotgun entity type to create.
:param dict data: Dictionary of fields and corresponding values to set on the new entity. If
``image`` or ``filmstrip_image`` fields are provided, the file path will be uploaded
to the server automatically.
:param list return_fields: Optional list of additional field values to return from the new
entity. Defaults to ``id`` field.
:returns: Shotgun entity dictionary containing the field/value pairs of all of the fields
set from the ``data`` parameter as well as the defaults ``type`` and ``id``. If any
additional fields were provided using the ``return_fields`` parameter, these would be
included as well.
:rtype: dict
"""
data = data.copy()
if not return_fields:
return_fields = ["id"]
upload_image = None
if "image" in data:
upload_image = data.pop("image")
upload_filmstrip_image = None
if "filmstrip_image" in data:
if not self.server_caps.version or self.server_caps.version < (3, 1, 0):
raise ShotgunError("Filmstrip thumbnail support requires server version 3.1 or "
"higher, server is %s" % (self.server_caps.version,))
upload_filmstrip_image = data.pop("filmstrip_image")
params = {
"type": entity_type,
"fields": self._dict_to_list(data),
"return_fields": return_fields
}
record = self._call_rpc("create", params, first=True)
result = self._parse_records(record)[0]
if upload_image:
self.upload_thumbnail(entity_type, result["id"], upload_image)
image = self.find_one(entity_type, [["id", "is", result.get("id")]], fields=["image"])
result["image"] = image.get("image")
if upload_filmstrip_image:
self.upload_filmstrip_thumbnail(entity_type, result["id"], upload_filmstrip_image)
filmstrip = self.find_one(entity_type, [["id", "is", result.get("id")]], fields=["filmstrip_image"])
result["filmstrip_image"] = filmstrip.get("filmstrip_image")
return result
def update(self, entity_type, entity_id, data, multi_entity_update_modes=None):
"""
Update the specified entity with the supplied data.
>>> shots = [
... {'type':'Shot', 'id':'40435'},
... {'type':'Shot', 'id':'40438'},
... {'type':'Shot', 'id':'40441'}]
>>> data = {
... 'shots': shots_asset_is_in,
... 'sg_status_list':'rev'}
>>> sg.update("Asset", 55, data)
{'type': 'Shot',
'id': 55,
'sg_status_`list`': 'rev',
'shots': [{'id': 40435, 'name': '100_010', 'type': 'Shot', 'valid': 'valid'},
{'id': 40438, 'name': '100_040', 'type': 'Shot', 'valid': 'valid'},
{'id': 40441, 'name': '100_070', 'type': 'Shot', 'valid': 'valid'}]
}
:param str entity_type: Entity type to update.
:param id entity_id: id of the entity to update.
:param dict data: key/value pairs where key is the field name and value is the value to set
for that field. This method does not restrict the updating of fields hidden in the web
UI via the Project Tracking Settings panel.
:param dict multi_entity_update_modes: Optional dict indicating what update mode to use
when updating a multi-entity link field. The keys in the dict are the fields to set
the mode for, and the values from the dict are one of ``set``, ``add``, or ``remove``.
Defaults to ``set``.
::
multi_entity_update_modes={"shots": "add", "assets": "remove"}
:returns: Dictionary of the fields updated, with the default keys `type` and `id` added as well.
:rtype: dict
"""
data = data.copy()
upload_image = None
if "image" in data and data["image"] is not None:
upload_image = data.pop("image")
upload_filmstrip_image = None
if "filmstrip_image" in data:
if not self.server_caps.version or self.server_caps.version < (3, 1, 0):
raise ShotgunError("Filmstrip thumbnail support requires server version 3.1 or "
"higher, server is %s" % (self.server_caps.version,))
upload_filmstrip_image = data.pop("filmstrip_image")
if data:
params = {
"type": entity_type,
"id": entity_id,
"fields": self._dict_to_list(
data,
extra_data=self._dict_to_extra_data(
multi_entity_update_modes, "multi_entity_update_mode"))
}
record = self._call_rpc("update", params)
result = self._parse_records(record)[0]
else:
result = {"id": entity_id, "type": entity_type}
if upload_image:
self.upload_thumbnail(entity_type, entity_id, upload_image)
image = self.find_one(entity_type, [["id", "is", result.get("id")]], fields=["image"])
result["image"] = image.get("image")
if upload_filmstrip_image:
self.upload_filmstrip_thumbnail(entity_type, result["id"], upload_filmstrip_image)
filmstrip = self.find_one(entity_type, [["id", "is", result.get("id")]], fields=["filmstrip_image"])
result["filmstrip_image"] = filmstrip.get("filmstrip_image")
return result
def delete(self, entity_type, entity_id):
"""
Retire the specified entity.
Entities in Shotgun are not "deleted" destructively, they are instead, "retired". This
means they are placed in the trash where they are no longer accessible to users.
The entity can be brought back to life using :meth:`~shotgun_api3.Shotgun.revive`.
>>> sg.delete("Shot", 2557)
True
:param str entity_type: Shotgun entity type to delete.
:param id entity_id: ``id`` of the entity to delete.
:returns: ``True`` if the entity was deleted, ``False`` otherwise (for example, if the
entity was already deleted).
:rtype: bool
:raises: :class:`Fault` if entity does not exist (deleted or not).
"""
params = {
"type": entity_type,
"id": entity_id
}
return self._call_rpc("delete", params)
def revive(self, entity_type, entity_id):
"""
Revive an entity that has previously been deleted.
>>> sg.revive("Shot", 860)
True
:param str entity_type: Shotgun entity type to revive.
:param int entity_id: id of the entity to revive.
:returns: ``True`` if the entity was revived, ``False`` otherwise (e.g. if the
entity is not currently retired).
:rtype: bool
"""
params = {
"type": entity_type,
"id": entity_id
}
return self._call_rpc("revive", params)
def batch(self, requests):
"""
Make a batch request of several :meth:`~shotgun_api3.Shotgun.create`,
:meth:`~shotgun_api3.Shotgun.update`, and :meth:`~shotgun_api3.Shotgun.delete` calls.
All requests are performed within a transaction, so either all will complete or none will.
Ex. Make a bunch of shots::
batch_data = []
for i in range(1,100):
data = {
"code": "shot_%04d" % i,
"project": project
}
batch_data.append({"request_type": "create", "entity_type": "Shot", "data": data})
sg.batch(batch_data)
Example output::
[{'code': 'shot_0001',
'type': 'Shot',
'id': 3624,
'project': {'id': 4, 'name': 'Demo Project', 'type': 'Project'}},
...
... and a bunch more ...
...
{'code': 'shot_0099',
'type': 'Shot',
'id': 3722,
'project': {'id': 4, 'name': 'Demo Project', 'type': 'Project'}}]
Ex. All three types of requests in one batch::
batch_data = [
{"request_type": "create", "entity_type": "Shot", "data": {"code": "New Shot 1", "project": project}},
{"request_type": "update", "entity_type": "Shot", "entity_id": 3624, "data": {"code": "Changed 1"}},
{"request_type": "delete", "entity_type": "Shot", "entity_id": 3624}
]
sg.batch(batch_data)
Example output::
[{'code': 'New Shot 1', 'type': 'Shot', 'id': 3723,
'project': {'id': 4, 'name': 'Demo Project', 'type': 'Project'}},
{'code': 'Changed 1', 'type': 'Shot', 'id': 3624},
True]
:param list requests: A list of dict's of the form which have a request_type key and also
specifies:
- create: ``entity_type``, data dict of fields to set
- update: ``entity_type``, ``entity_id``, data dict of fields to set,
and optionally ``multi_entity_update_modes``
- delete: ``entity_type`` and entity_id
:returns: A list of values for each operation. Create and update requests return a dict of
the fields updated. Delete requests return ``True`` if the entity was deleted.
:rtype: list
"""
if not isinstance(requests, list):
raise ShotgunError("batch() expects a list. Instead was sent a %s" % type(requests))
# If we have no requests, just return an empty list immediately.
# Nothing to process means nothing to get results of.
if len(requests) == 0:
return []
calls = []
def _required_keys(message, required_keys, data):
missing = set(required_keys) - set(data.keys())
if missing:
raise ShotgunError("%s missing required key: %s. "
"Value was: %s." % (message, ", ".join(missing), data))
for req in requests:
_required_keys("Batched request",
["request_type", "entity_type"],
req)
request_params = {"request_type": req["request_type"], "type": req["entity_type"]}
if req["request_type"] == "create":
_required_keys("Batched create request", ["data"], req)
request_params["fields"] = self._dict_to_list(req["data"])
request_params["return_fields"] = req.get("return_fields") or["id"]
elif req["request_type"] == "update":
_required_keys("Batched update request",
["entity_id", "data"],
req)
request_params["id"] = req["entity_id"]
request_params["fields"] = self._dict_to_list(
req["data"],
extra_data=self._dict_to_extra_data(
req.get("multi_entity_update_modes"),
"multi_entity_update_mode"
)
)
if "multi_entity_update_mode" in req:
request_params["multi_entity_update_mode"] = req["multi_entity_update_mode"]
elif req["request_type"] == "delete":
_required_keys("Batched delete request", ["entity_id"], req)
request_params["id"] = req["entity_id"]
else:
raise ShotgunError("Invalid request_type '%s' for batch" % (
req["request_type"]))
calls.append(request_params)
records = self._call_rpc("batch", calls)
return self._parse_records(records)
def work_schedule_read(self, start_date, end_date, project=None, user=None):
"""
Return the work day rules for a given date range.
.. versionadded:: 3.0.9
Requires Shotgun server v3.2.0+
This returns the defined WorkDayRules between the ``start_date`` and ``end_date`` inclusive
as a dict where the key is the date and the value is another dict describing the rule for
that date.
Rules are represented by a dict with the following keys:
:description: the description entered into the work day rule exception if applicable.
:reason: one of six options:
- STUDIO_WORK_WEEK: standard studio schedule applies
- STUDIO_EXCEPTION: studio-wide exception applies
- PROJECT_WORK_WEEK: standard project schedule applies
- PROJECT_EXCEPTION: project-specific exception applies
- USER_WORK_WEEK: standard user work week applies
- USER_EXCEPTION: user-specific exception applies
:working: boolean indicating whether it is a "working" day or not.
>>> sg.work_schedule_read("2015-12-21", "2015-12-25")
{'2015-12-21': {'description': None,
'reason': 'STUDIO_WORK_WEEK',
'working': True},
'2015-12-22': {'description': None,
'reason': 'STUDIO_WORK_WEEK',
'working': True},
'2015-12-23': {'description': None,
'reason': 'STUDIO_WORK_WEEK',
'working': True},
'2015-12-24': {'description': 'Closed for Christmas Eve',
'reason': 'STUDIO_EXCEPTION',
'working': False},
'2015-12-25': {'description': 'Closed for Christmas',
'reason': 'STUDIO_EXCEPTION',
'working': False}}
:param str start_date: Start date of date range. ``YYYY-MM-DD``
:param str end_date: End date of date range. ``YYYY-MM-DD``
:param dict project: Optional Project entity to query `WorkDayRules` for.
:param dict user: Optional HumanUser entity to query WorkDayRules for.
:returns: Complex dict containing each date and the WorkDayRule defined for that date
between the ``start_date`` and ``end date`` inclusive. See above for details.
:rtype: dict
"""
if not self.server_caps.version or self.server_caps.version < (3, 2, 0):
raise ShotgunError("Work schedule support requires server version 3.2 or "
"higher, server is %s" % (self.server_caps.version,))
if not isinstance(start_date, str) or not isinstance(end_date, str):
raise ShotgunError("The start_date and end_date arguments must be strings in YYYY-MM-DD format")
params = dict(
start_date=start_date,
end_date=end_date,
project=project,
user=user
)
return self._call_rpc("work_schedule_read", params)
def work_schedule_update(self, date, working, description=None, project=None, user=None,
recalculate_field=None):
"""
Update the work schedule for a given date.
.. versionadded:: 3.0.9
Requires Shotgun server v3.2.0+
If neither ``project`` nor ``user`` are passed in, the studio work schedule will be updated.
``project`` and ``user`` can only be used exclusively of each other.
>>> sg.work_schedule_update ("2015-12-31", working=False,
... description="Studio closed for New Years Eve", project=None,
... user=None, recalculate_field=None)
{'date': '2015-12-31',
'description': "Studio closed for New Years Eve",
'project': None,
'user': None,
'working': False}
:param str date: Date of WorkDayRule to update. ``YYY-MM-DD``
:param bool working: Indicates whether the day is a working day or not.
:param str description: Optional reason for time off.
:param dict project: Optional Project entity to assign the rule to. Cannot be used with the
``user`` param.
:param dict user: Optional HumanUser entity to assign the rule to. Cannot be used with the
``project`` param.
:param str recalculate_field: Optional schedule field that will be recalculated on Tasks
when they are affected by a change in working schedule. Options are ``due_date`` or
``duration``. Defaults to the value set in the Shotgun web application's Site
Preferences.
:returns: dict containing key/value pairs for each value of the work day rule updated.
:rtype: dict
"""
if not self.server_caps.version or self.server_caps.version < (3, 2, 0):
raise ShotgunError("Work schedule support requires server version 3.2 or "
"higher, server is %s" % (self.server_caps.version,))
if not isinstance(date, str):
raise ShotgunError("The date argument must be string in YYYY-MM-DD format")
params = dict(
date=date,
working=working,
description=description,
project=project,
user=user,
recalculate_field=recalculate_field
)
return self._call_rpc("work_schedule_update", params)
def follow(self, user, entity):
"""
Add the entity to the user's followed entities.
If the user is already following the entity, the method will succeed but nothing will be
changed on the server-side.
>>> sg.follow({"type": "HumanUser", "id": 42}, {"type": "Shot", "id": 2050})
{'followed': True, 'user': {'type': 'HumanUser', 'id': 42},
'entity': {'type': 'Shot', 'id': 2050}}
:param dict user: User entity that will follow the entity.
:param dict entity: Shotgun entity to be followed.
:returns: dict with ``"followed": True`` as well as key/values for the params that were
passed in.
:rtype: dict
"""
if not self.server_caps.version or self.server_caps.version < (5, 1, 22):
raise ShotgunError("Follow support requires server version 5.2 or "
"higher, server is %s" % (self.server_caps.version,))
params = dict(
user=user,
entity=entity
)
return self._call_rpc("follow", params)
def unfollow(self, user, entity):
"""
Remove entity from the user's followed entities.
This does nothing if the user is not following the entity.
>>> sg.unfollow({"type": "HumanUser", "id": 42}, {"type": "Shot", "id": 2050})
{'entity': {'type': 'Shot', 'id': 2050}, 'user': {'type': 'HumanUser', 'id': 42},
'unfollowed': True}
:param dict user: User entity that will unfollow the entity.
:param dict entity: Entity to be unfollowed
:returns: dict with ``"unfollowed": True`` as well as key/values for the params that were
passed in.
:rtype: dict
"""
if not self.server_caps.version or self.server_caps.version < (5, 1, 22):
raise ShotgunError("Follow support requires server version 5.2 or "
"higher, server is %s" % (self.server_caps.version,))
params = dict(
user=user,
entity=entity
)
return self._call_rpc("unfollow", params)
def followers(self, entity):
"""
Return all followers for an entity.
>>> sg.followers({"type": "Shot", "id": 2050})
[{'status': 'act', 'valid': 'valid', 'type': 'HumanUser', 'name': 'Richard Hendriks',
'id': 42},
{'status': 'act', 'valid': 'valid', 'type': 'HumanUser', 'name': 'Bertram Gilfoyle',
'id': 33},
{'status': 'act', 'valid': 'valid', 'type': 'HumanUser', 'name': 'Dinesh Chugtai',
'id': 57}]
:param dict entity: Entity to find followers of.
:returns: list of dicts representing each user following the entity
:rtype: list
:versionadded:
"""
if not self.server_caps.version or self.server_caps.version < (5, 1, 22):
raise ShotgunError("Follow support requires server version 5.2 or "
"higher, server is %s" % (self.server_caps.version,))
params = dict(
entity=entity
)
return self._call_rpc("followers", params)
def following(self, user, project=None, entity_type=None):
"""
Return all entity instances a user is following.
Optionally, a project and/or entity_type can be supplied to restrict returned results.
>>> user = {"type": "HumanUser", "id": 1234}
>>> project = {"type": "Project", "id": 1234}
>>> entity_type = "Task"
>>> sg.following(user, project=project, entity_type=entity_type)
[{"type":"Task", "id":1},
{"type":"Task", "id":2},
{"type":"Task", "id":3}]
:param dict user: Find what this person is following.
:param dict project: Optional filter to only return results from a specific project.
:param str entity_type: Optional filter to only return results from one entity type.
:returns: list of dictionaries, each containing entity type & id's being followed.
:rtype: list
"""
self.server_caps.ensure_user_following_support()
params = {
"user": user
}
if project:
params["project"] = project
if entity_type:
params["entity_type"] = entity_type
return self._call_rpc("following", params)
def schema_entity_read(self, project_entity=None):
"""
Return all active entity types, their display names, and their visibility.
If the project parameter is specified, the schema visibility for the given project is
being returned. If the project parameter is omitted or set to ``None``, a full listing is
returned where per-project entity type visibility settings are not considered.
>>> sg.schema_entity_read()
{'ActionMenuItem': {'name': {'editable': False, 'value': 'Action Menu Item'},
'visible': {'editable': False, 'value': True}},
'ApiUser': {'name': {'editable': False, 'value': 'Script'},
'visible': {'editable': False, 'value': True}},
'AppWelcomeUserConnection': {'name': {'editable': False,
'value': 'App Welcome User Connection'},
'visible': {'editable': False, 'value': True}},
'Asset': {'name': {'editable': False, 'value': 'Asset'},
'visible': {'editable': False, 'value': True}},
'AssetAssetConnection': {'name': {'editable': False,
'value': 'Asset Asset Connection'},
'visible': {'editable': False, 'value': True}},
'...'
}
:param dict project_entity: Optional Project entity specifying which project to return
the listing for. If omitted or set to ``None``, per-project visibility settings are
not taken into consideration and the global list is returned. Example:
``{'type': 'Project', 'id': 3}``
:returns: dict of Entity Type to dict containing the display name.
:rtype: dict
.. note::
The returned display names for this method will be localized when the ``localize`` Shotgun config property is set to ``True``. See :ref:`localization` for more information.
"""
params = {}
params = self._add_project_param(params, project_entity)
if params:
return self._call_rpc("schema_entity_read", params)
else:
return self._call_rpc("schema_entity_read", None)
def schema_read(self, project_entity=None):
"""
Get the schema for all fields on all entities.
.. note::
If ``project_entity`` is not specified, everything is reported as visible.
>>> sg.schema_read()
{'ActionMenuItem': {'created_at': {'data_type': {'editable': False, 'value': 'date_time'},
'description': {'editable': True, 'value': ''},
'editable': {'editable': False, 'value': False},
'entity_type': {'editable': False, 'value': 'ActionMenuItem'},
'mandatory': {'editable': False, 'value': False},
'name': {'editable': True, 'value': 'Date Created'},
'properties': {'default_value': {'editable': False, 'value': None},
'summary_default': {'editable': True, 'value': 'none'}},
'unique': {'editable': False, 'value': False},
'visible': {'editable': False, 'value': True}},
'created_by': {'data_type': {'editable': False,'value': 'entity'},
'description': {'editable': True,'value': ''},
'editable': {'editable': False,'value': False},
'entity_type': {'editable': False,'value': 'ActionMenuItem'},
'mandatory': {'editable': False,'value': False},
'name': {'editable': True,'value': 'Created by'},
'properties': {'default_value': {'editable': False,'value': None},
'summary_default': {'editable': True,'value': 'none'},
'valid_types': {'editable': True,'value':
['HumanUser','ApiUser']}},
'unique': {'editable': False,'value': False},
'visible': {'editable': False,'value': True}},
...
...
...
...
'Version': {'client_approved': {'data_type': {'editable': False,'value': 'checkbox'},
'description': {'editable': True,'value': ''},
'editable': {'editable': False,'value': True},
'entity_type': {'editable': False,'value': 'Version'},
'mandatory': {'editable': False,'value': False},
'name': {'editable': True,'value': 'Client Approved'},
'properties': {'default_value': {'editable': False,'value': False},
'summary_default': {'editable': False,'value': 'none'}},
'unique': {'editable': False,'value': False},
'visible': {'editable': False,'value': True}},
...
...
...
...
}
:param dict project_entity: Optional, Project entity specifying which project to return
the listing for. If omitted or set to ``None``, per-project visibility settings are
not taken into consideration and the global list is returned. Example:
``{'type': 'Project', 'id': 3}``. Defaults to ``None``.
:returns: A nested dict object containing a key/value pair for all fields of all entity
types. Properties that are ``'editable': True``, can be updated using the
:meth:`~shotgun_api3.Shotgun.schema_field_update` method.
:rtype: dict
.. note::
The returned display names for this method will be localized when the ``localize`` Shotgun config property is set to ``True``. See :ref:`localization` for more information.
"""
params = {}
params = self._add_project_param(params, project_entity)
if params:
return self._call_rpc("schema_read", params)
else:
return self._call_rpc("schema_read", None)
def schema_field_read(self, entity_type, field_name=None, project_entity=None):
"""
Get schema for all fields on the specified entity type or just the field name specified
if provided.
.. note::
Unlike how the results of a :meth:`~shotgun_api3.Shotgun.find` can be pumped into a
:meth:`~shotgun_api3.Shotgun.create` or :meth:`~shotgun_api3.Shotgun.update`, the
results of :meth:`~shotgun_api3.Shotgun.schema_field_read` are not compatible with
the format used for :meth:`~shotgun_api3.Shotgun.schema_field_create` or
:meth:`~shotgun_api3.Shotgun.schema_field_update`. If you need to pipe the results
from :meth:`~shotgun_api3.Shotgun.schema_field_read` into a
:meth:`~shotgun_api3.Shotgun.schema_field_create` or
:meth:`~shotgun_api3.Shotgun.schema_field_update`, you will need to reformat the
data in your script.
.. note::
If you don't specify a ``project_entity``, everything is reported as visible.
.. note::
The returned display names for this method will be localized when the ``localize`` Shotgun config property is set to ``True``. See :ref:`localization` for more information.
>>> sg.schema_field_read('Asset', 'shots')
{'shots': {'data_type': {'editable': False, 'value': 'multi_entity'},
'description': {'editable': True, 'value': ''},
'editable': {'editable': False, 'value': True},
'entity_type': {'editable': False, 'value': 'Asset'},
'mandatory': {'editable': False, 'value': False},
'name': {'editable': True, 'value': 'Shots'},
'properties': {'default_value': {'editable': False,
'value': None},
'summary_default': {'editable': True,
'value': 'none'},
'valid_types': {'editable': True,
'value': ['Shot']}},
'unique': {'editable': False, 'value': False},
'visible': {'editable': False, 'value': True}}}
:param str entity_type: Entity type to get the schema for.
:param str field_name: Optional internal Shotgun name of the field to get the schema
definition for. If this parameter is excluded or set to ``None``, data structures of
all fields will be returned. Defaults to ``None``. Example: ``sg_temp_field``.
:param dict project_entity: Optional Project entity specifying which project to return
the listing for. If omitted or set to ``None``, per-project visibility settings are
not taken into consideration and the global list is returned. Example:
``{'type': 'Project', 'id': 3}``
:returns: a nested dict object containing a key/value pair for the ``field_name`` specified
and its properties, or if no field_name is specified, for all the fields of the
``entity_type``. Properties that are ``'editable': True``, can be updated using the
:meth:`~shotgun_api3.Shotgun.schema_field_update` method.
:rtype: dict
"""
params = {
"type": entity_type,
}
if field_name:
params["field_name"] = field_name
params = self._add_project_param(params, project_entity)
return self._call_rpc("schema_field_read", params)
def schema_field_create(self, entity_type, data_type, display_name, properties=None):
"""
Create a field for the specified entity type.
.. note::
If the internal Shotgun field name computed from the provided ``display_name`` already
exists, the internal Shotgun field name will automatically be appended with ``_1`` in
order to create a unique name. The integer suffix will be incremented by 1 until a
unique name is found.
>>> properties = {"summary_default": "count", "description": "Complexity breakdown of Asset"}
>>> sg.schema_field_create("Asset", "text", "Complexity", properties)
'sg_complexity'
:param str entity_type: Entity type to add the field to.
:param str data_type: Shotgun data type for the new field.
:param str display_name: Specifies the display name of the field you are creating. The
system name will be created from this display name and returned upon successful
creation.
:param dict properties: Dict of valid properties for the new field. Use this to specify
other field properties such as the 'description' or 'summary_default'.
:returns: The internal Shotgun name for the new field, this is different to the
``display_name`` parameter passed in.
:rtype: str
"""
params = {
"type": entity_type,
"data_type": data_type,
"properties": [
{"property_name": "name", "value": display_name}
]
}
params["properties"].extend(self._dict_to_list(properties, key_name="property_name", value_name="value"))
return self._call_rpc("schema_field_create", params)
def schema_field_update(self, entity_type, field_name, properties, project_entity=None):
"""
Update the properties for the specified field on an entity.
.. note::
Although the property name may be the key in a nested dictionary, like
'summary_default', it is treated no differently than keys that are up
one level, like 'description'.
>>> properties = {"name": "Test Number Field Renamed", "summary_default": "sum",
... "description": "this is only a test"}
>>> sg.schema_field_update("Asset", "sg_test_number", properties)
True
:param entity_type: Entity type of field to update.
:param field_name: Internal Shotgun name of the field to update.
:param properties: Dictionary with key/value pairs where the key is the property to be
updated and the value is the new value.
:param dict project_entity: Optional Project entity specifying which project to modify the
``visible`` property for. If ``visible`` is present in ``properties`` and
``project_entity`` is not set, an exception will be raised. Example:
``{'type': 'Project', 'id': 3}``
:returns: ``True`` if the field was updated.
.. note::
The ``project_entity`` parameter can only affect the state of the ``visible`` property
and has no impact on other properties.
:rtype: bool
"""
params = {
"type": entity_type,
"field_name": field_name,
"properties": [
{"property_name": k, "value": v}
for k, v in six.iteritems((properties or {}))
]
}
params = self._add_project_param(params, project_entity)
return self._call_rpc("schema_field_update", params)
def schema_field_delete(self, entity_type, field_name):
"""
Delete the specified field from the entity type.
>>> sg.schema_field_delete("Asset", "sg_temp_field")
True
:param str entity_type: Entity type to delete the field from.
:param str field_name: Internal Shotgun name of the field to delete.
:returns: ``True`` if the field was deleted.
:rtype: bool
"""
params = {
"type": entity_type,
"field_name": field_name
}
return self._call_rpc("schema_field_delete", params)
def add_user_agent(self, agent):
"""
Add agent to the user-agent header.
Appends agent to the user-agent string sent with every API request.
>>> sg.add_user_agent("my_tool 1.0")
:param str agent: string to append to user-agent.
"""
self._user_agents.append(agent)
def reset_user_agent(self):
"""
Reset user agent to the default value.
Example default user-agent::
shotgun-json (3.0.17); Python 2.6 (Mac); ssl OpenSSL 1.0.2d 9 Jul 2015 (validate)
"""
ua_platform = "Unknown"
if self.client_caps.platform is not None:
ua_platform = self.client_caps.platform.capitalize()
# create ssl validation string based on settings
validation_str = "validate"
if self.config.no_ssl_validation:
validation_str = "no-validate"
self._user_agents = ["shotgun-json (%s)" % __version__,
"Python %s (%s)" % (self.client_caps.py_version, ua_platform),
"ssl %s (%s)" % (self.client_caps.ssl_version, validation_str)]
def set_session_uuid(self, session_uuid):
"""
Set the browser session_uuid in the current Shotgun API instance.
When this is set, any events generated by the API will include the ``session_uuid`` value
on the corresponding EventLogEntries. If there is a current browser session open with
this ``session_uuid``, the browser will display updates for these events.
>>> sg.set_session_uuid("5a1d49b0-0c69-11e0-a24c-003048d17544")
:param str session_uuid: The uuid of the browser session to be updated.
"""
self.config.session_uuid = session_uuid
return
def share_thumbnail(self, entities, thumbnail_path=None, source_entity=None,
filmstrip_thumbnail=False, **kwargs):
"""
Associate a thumbnail with more than one Shotgun entity.
.. versionadded:: 3.0.9
Requires Shotgun server v4.0.0+
Share the thumbnail from between entities without requiring uploading the thumbnail file
multiple times. You can use this in two ways:
1) Upload an image to set as the thumbnail on multiple entities.
2) Update multiple entities to point to an existing entity's thumbnail.
.. note::
When sharing a filmstrip thumbnail, it is required to have a static thumbnail in
place before the filmstrip will be displayed in the Shotgun web UI.
If the :ref:`thumbnail is still processing and is using a placeholder
<interpreting_image_field_strings>`, this method will error.
Simple use case:
>>> thumb = '/data/show/ne2/100_110/anim/01.mlk-02b.jpg'
>>> e = [{'type': 'Version', 'id': 123}, {'type': 'Version', 'id': 456}]
>>> sg.share_thumbnail(entities=e, thumbnail_path=thumb)
4271
>>> e = [{'type': 'Version', 'id': 123}, {'type': 'Version', 'id': 456}]
>>> sg.share_thumbnail(entities=e, source_entity={'type':'Version', 'id': 789})
4271
:param list entities: The entities to update to point to the shared thumbnail provided in
standard entity dict format::
[{'type': 'Version', 'id': 123},
{'type': 'Version', 'id': 456}]
:param str thumbnail_path: The full path to the local thumbnail file to upload and share.
Required if ``source_entity`` is not provided.
:param dict source_entity: The entity whos thumbnail will be the source for sharing.
Required if ``source_entity`` is not provided.
:param bool filmstrip_thumbnail: ``True`` to share the filmstrip thumbnail. ``False`` to
share the static thumbnail. Defaults to ``False``.
:returns: ``id`` of the Attachment entity representing the source thumbnail that is shared.
:rtype: int
:raises: :class:`ShotgunError` if not supported by server version or improperly called,
or :class:`ShotgunThumbnailNotReady` if thumbnail is still pending.
"""
if not self.server_caps.version or self.server_caps.version < (4, 0, 0):
raise ShotgunError("Thumbnail sharing support requires server "
"version 4.0 or higher, server is %s" % (self.server_caps.version,))
if not isinstance(entities, list) or len(entities) == 0:
raise ShotgunError("'entities' parameter must be a list of entity "
"hashes and may not be empty")
for e in entities:
if not isinstance(e, dict) or "id" not in e or "type" not in e:
raise ShotgunError("'entities' parameter must be a list of "
"entity hashes with at least 'type' and 'id' keys.\nInvalid "
"entity: %s" % e)
if (not thumbnail_path and not source_entity) or (thumbnail_path and source_entity):
raise ShotgunError("You must supply either thumbnail_path OR source_entity.")
# upload thumbnail
if thumbnail_path:
source_entity = entities.pop(0)
if filmstrip_thumbnail:
thumb_id = self.upload_filmstrip_thumbnail(
source_entity["type"],
source_entity["id"],
thumbnail_path,
**kwargs
)
else:
thumb_id = self.upload_thumbnail(
source_entity["type"],
source_entity["id"],
thumbnail_path,
**kwargs
)
else:
if not isinstance(source_entity, dict) or "id" not in source_entity or "type" not in source_entity:
raise ShotgunError("'source_entity' parameter must be a dict "
"with at least 'type' and 'id' keys.\nGot: %s (%s)"
% (source_entity, type(source_entity)))
# only 1 entity in list and we already uploaded the thumbnail to it
if len(entities) == 0:
return thumb_id
# update entities with source_entity thumbnail
entities_str = []
for e in entities:
entities_str.append("%s_%s" % (e["type"], e["id"]))
# format for post request
if filmstrip_thumbnail:
filmstrip_thumbnail = 1
params = {
"entities": ",".join(entities_str),
"source_entity": "%s_%s" % (source_entity["type"], source_entity["id"]),
"filmstrip_thumbnail": filmstrip_thumbnail,
}
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/upload/share_thumbnail", None, None, None))
result = self._send_form(url, params)
if result.startswith("1:"):
# clearing thumbnail returns no attachment_id
try:
attachment_id = int(result.split(":", 2)[1].split("\n", 1)[0])
except ValueError:
attachment_id = None
elif result.startswith("2"):
raise ShotgunThumbnailNotReady("Unable to share thumbnail: %s" % result)
else:
raise ShotgunError("Unable to share thumbnail: %s" % result)
return attachment_id
def upload_thumbnail(self, entity_type, entity_id, path, **kwargs):
"""
Upload a file from a local path and assign it as the thumbnail for the specified entity.
.. note::
Images will automatically be re-sized on the server to generate a size-appropriate image
file. However, the original file is retained as well and is accessible when you click
on the thumbnail image in the web UI. If you are using a local install of Shotgun and
have not enabled S3, this can eat up disk space if you're uploading really large source
images for your thumbnails.
You can un-set (aka clear) a thumbnail on an entity using the
:meth:`~shotgun_api3.Shotgun.update` method and setting the **image** field to ``None``.
This will also unset the ``filmstrip_thumbnail`` field if it is set.
Supported image file types include ``.jpg` and ``.png`` (preferred) but will also accept.
``.gif```, ``.tif``, ``.tiff``, ``.bmp``, ``.exr``, ``.dpx``, and ``.tga``.
This method wraps over :meth:`~shotgun_api3.Shotgun.upload`. Additional keyword arguments
passed to this method will be forwarded to the :meth:`~shotgun_api3.Shotgun.upload` method.
:param str entity_type: Entity type to set the thumbnail for.
:param int entity_id: Id of the entity to set the thumbnail for.
:param str path: Full path to the thumbnail file on disk.
:returns: Id of the new attachment
"""
return self.upload(entity_type, entity_id, path, field_name="thumb_image", **kwargs)
def upload_filmstrip_thumbnail(self, entity_type, entity_id, path, **kwargs):
"""
Upload filmstrip thumbnail to specified entity.
.. versionadded:: 3.0.9
Requires Shotgun server v3.1.0+
Uploads a file from a local directory and assigns it as the filmstrip thumbnail for the
specified entity. The image must be a horizontal strip of any number of frames that are
exactly 240 pixels wide. Therefore the whole strip must be an exact multiple of 240 pixels
in width. The height can be anything (and will depend on the aspect ratio of the frames).
Any image file type that works for thumbnails will work for filmstrip thumbnails.
Filmstrip thumbnails will only be visible in the Thumbnail field on an entity if a
regular thumbnail image is also uploaded to the entity. The standard thumbnail is
displayed by default as the poster frame. Then, on hover, the filmstrip thumbnail is
displayed and updated based on your horizontal cursor position for scrubbing. On mouseout,
the default thumbnail is displayed again as the poster frame.
The url for a filmstrip thumbnail on an entity is available by querying for the
``filmstrip_image field``.
You can un-set (aka clear) a thumbnail on an entity using the
:meth:`~shotgun_api3.Shotgun.update` method and setting the **image** field to ``None``.
This will also unset the ``filmstrip_thumbnail`` field if it is set.
This method wraps over :meth:`~shotgun_api3.Shotgun.upload`. Additional keyword arguments
passed to this method will be forwarded to the :meth:`~shotgun_api3.Shotgun.upload` method.
>>> filmstrip_thumbnail = '/data/show/ne2/100_110/anim/01.mlk-02b_filmstrip.jpg'
>>> sg.upload_filmstrip_thumbnail("Version", 27, filmstrip_thumbnail)
87
:param str entity_type: Entity type to set the filmstrip thumbnail for.
:param int entity_id: Id of the entity to set the filmstrip thumbnail for.
:param str path: Full path to the filmstrip thumbnail file on disk.
:returns: Id of the new Attachment entity created for the filmstrip thumbnail
:rtype: int
"""
if not self.server_caps.version or self.server_caps.version < (3, 1, 0):
raise ShotgunError("Filmstrip thumbnail support requires server version 3.1 or "
"higher, server is %s" % (self.server_caps.version,))
return self.upload(entity_type, entity_id, path, field_name="filmstrip_thumb_image", **kwargs)
def upload(self, entity_type, entity_id, path, field_name=None, display_name=None,
tag_list=None):
"""
Upload a file to the specified entity.
Creates an Attachment entity for the file in Shotgun and links it to the specified entity.
You can optionally store the file in a field on the entity, change the display name, and
assign tags to the Attachment.
.. note::
Make sure to have retries for file uploads. Failures when uploading will occasionally happen.
When it does, immediately retrying to upload usually works
>>> mov_file = '/data/show/ne2/100_110/anim/01.mlk-02b.mov'
>>> sg.upload("Shot", 423, mov_file, field_name="sg_latest_quicktime",
... display_name="Latest QT")
72
:param str entity_type: Entity type to link the upload to.
:param int entity_id: Id of the entity to link the upload to.
:param str path: Full path to an existing non-empty file on disk to upload.
:param str field_name: The internal Shotgun field name on the entity to store the file in.
This field must be a File/Link field type.
:param str display_name: The display name to use for the file. Defaults to the file name.
:param str tag_list: comma-separated string of tags to assign to the file.
:returns: Id of the Attachment entity that was created for the image.
:rtype: int
:raises: :class:`ShotgunError` on upload failure.
"""
# Basic validations of the file to upload.
path = os.path.abspath(os.path.expanduser(path or ""))
# We need to check for string encodings that we aren't going to be able
# to support later in the upload process. If the given path wasn't already
# unicode, we will try to decode it as utf-8, and if that fails then we
# have to raise a sane exception. This will always work for ascii and utf-8
# encoded strings, but will fail on some others if the string includes non
# ascii characters.
if not isinstance(path, six.text_type):
try:
path = path.decode("utf-8")
except UnicodeDecodeError:
raise ShotgunError(
"Could not upload the given file path. It is encoded as "
"something other than utf-8 or ascii. To upload this file, "
"it can be string encoded as utf-8, or given as unicode: %s" % path
)
if not os.path.isfile(path):
raise ShotgunError("Path must be a valid file, got '%s'" % path)
if os.path.getsize(path) == 0:
raise ShotgunError("Path cannot be an empty file: '%s'" % path)
is_thumbnail = (field_name in ["thumb_image", "filmstrip_thumb_image", "image",
"filmstrip_image"])
# Supported types can be directly uploaded to Cloud storage
if self._requires_direct_s3_upload(entity_type, field_name):
return self._upload_to_storage(entity_type, entity_id, path, field_name, display_name,
tag_list, is_thumbnail)
else:
return self._upload_to_sg(entity_type, entity_id, path, field_name, display_name,
tag_list, is_thumbnail)
def _upload_to_storage(self, entity_type, entity_id, path, field_name, display_name,
tag_list, is_thumbnail):
"""
Internal function to upload a file to the Cloud storage and link it to the specified entity.
:param str entity_type: Entity type to link the upload to.
:param int entity_id: Id of the entity to link the upload to.
:param str path: Full path to an existing non-empty file on disk to upload.
:param str field_name: The internal Shotgun field name on the entity to store the file in.
This field must be a File/Link field type.
:param str display_name: The display name to use for the file. Defaults to the file name.
:param str tag_list: comma-separated string of tags to assign to the file.
:param bool is_thumbnail: indicates if the attachment is a thumbnail.
:returns: Id of the Attachment entity that was created for the image.
:rtype: int
"""
filename = os.path.basename(path)
# Step 1: get the upload url
is_multipart_upload = (os.path.getsize(path) > self._MULTIPART_UPLOAD_CHUNK_SIZE)
upload_info = self._get_attachment_upload_info(is_thumbnail, filename, is_multipart_upload)
# Step 2: upload the file
# We upload large files in multiple parts because it is more robust
# (and required when using S3 storage)
if is_multipart_upload:
self._multipart_upload_file_to_storage(path, upload_info)
else:
self._upload_file_to_storage(path, upload_info["upload_url"])
# Step 3: create the attachment
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/upload/api_link_file", None, None, None))
params = {
"entity_type": entity_type,
"entity_id": entity_id,
"upload_link_info": upload_info["upload_info"]
}
params.update(self._auth_params())
if is_thumbnail:
if field_name == "filmstrip_thumb_image" or field_name == "filmstrip_image":
params["filmstrip"] = True
else:
if display_name is None:
display_name = filename
# we allow linking to nothing for generic reference use cases
if field_name is not None:
params["field_name"] = field_name
params["display_name"] = display_name
# None gets converted to a string and added as a tag...
if tag_list:
params["tag_list"] = tag_list
result = self._send_form(url, params)
if not result.startswith("1"):
raise ShotgunError("Could not upload file successfully, but "
"not sure why.\nPath: %s\nUrl: %s\nError: %s"
% (path, url, result))
LOG.debug("Attachment linked to content on Cloud storage")
attachment_id = int(result.split(":", 2)[1].split("\n", 1)[0])
return attachment_id
def _upload_to_sg(self, entity_type, entity_id, path, field_name, display_name,
tag_list, is_thumbnail):
"""
Internal function to upload a file to Shotgun and link it to the specified entity.
:param str entity_type: Entity type to link the upload to.
:param int entity_id: Id of the entity to link the upload to.
:param str path: Full path to an existing non-empty file on disk to upload.
:param str field_name: The internal Shotgun field name on the entity to store the file in.
This field must be a File/Link field type.
:param str display_name: The display name to use for the file. Defaults to the file name.
:param str tag_list: comma-separated string of tags to assign to the file.
:param bool is_thumbnail: indicates if the attachment is a thumbnail.
:returns: Id of the Attachment entity that was created for the image.
:rtype: int
"""
params = {
"entity_type": entity_type,
"entity_id": entity_id,
}
params.update(self._auth_params())
# If we ended up with a unicode string path, we need to encode it
# as a utf-8 string. If we don't, there's a chance that there will
# will be an attempt later on to encode it as an ascii string, and
# that will fail ungracefully if the path contains any non-ascii
# characters.
#
# On Windows, if the path contains non-ascii characters, the calls
# to open later in this method will fail to find the file if given
# a non-ascii-encoded string path. In that case, we're going to have
# to call open on the unicode path, but we'll use the encoded string
# for everything else.
path_to_open = path
if isinstance(path, six.text_type):
path = path.encode("utf-8")
if sys.platform != "win32":
path_to_open = path
if is_thumbnail:
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/upload/publish_thumbnail", None, None, None))
params["thumb_image"] = open(path_to_open, "rb")
if field_name == "filmstrip_thumb_image" or field_name == "filmstrip_image":
params["filmstrip"] = True
else:
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/upload/upload_file", None, None, None))
if display_name is None:
display_name = os.path.basename(path)
# we allow linking to nothing for generic reference use cases
if field_name is not None:
params["field_name"] = field_name
params["display_name"] = display_name
# None gets converted to a string and added as a tag...
if tag_list:
params["tag_list"] = tag_list
params["file"] = open(path_to_open, "rb")
result = self._send_form(url, params)
if not result.startswith("1"):
raise ShotgunError("Could not upload file successfully, but "
"not sure why.\nPath: %s\nUrl: %s\nError: %s"
% (path, url, result))
attachment_id = int(result.split(":", 2)[1].split("\n", 1)[0])
return attachment_id
def _get_attachment_upload_info(self, is_thumbnail, filename, is_multipart_upload):
"""
Internal function to get the information needed to upload a file to Cloud storage.
:param bool is_thumbnail: indicates if the attachment is a thumbnail.
:param str filename: name of the file that will be uploaded.
:param bool is_multipart_upload: Indicates if we want multi-part upload information back.
:returns: dictionary containing upload details from the server.
These details are used throughout the upload process.
:rtype: dict
"""
if is_thumbnail:
upload_type = "Thumbnail"
else:
upload_type = "Attachment"
params = {
"upload_type": upload_type,
"filename": filename
}
params["multipart_upload"] = is_multipart_upload
upload_url = "/upload/api_get_upload_link_info"
url = urllib.parse.urlunparse((self.config.scheme, self.config.server, upload_url, None, None, None))
upload_info = self._send_form(url, params)
if not upload_info.startswith("1"):
raise ShotgunError("Could not get upload_url but "
"not sure why.\nPath: %s\nUrl: %s\nError: %s"
% (filename, url, upload_info))
LOG.debug("Completed rpc call to %s" % (upload_url))
upload_info_parts = upload_info.split("\n")
return {
"upload_url": upload_info_parts[1],
"timestamp": upload_info_parts[2],
"upload_type": upload_info_parts[3],
"upload_id": upload_info_parts[4],
"upload_info": upload_info
}
def download_attachment(self, attachment=False, file_path=None, attachment_id=None):
"""
Download the file associated with a Shotgun Attachment.
>>> version = sg.find_one("Version", [["id", "is", 7115]], ["sg_uploaded_movie"])
>>> local_file_path = "/var/tmp/%s" % version["sg_uploaded_movie"]["name"]
>>> sg.download_attachment(version["sg_uploaded_movie"], file_path=local_file_path)
/var/tmp/100b_scene_output_v032.mov
.. warning::
On older (< v5.1.0) Shotgun versions, non-downloadable files
on Shotgun don't raise exceptions, they cause a server error which
returns a 200 with the page content.
:param dict attachment: Usually a dictionary representing an Attachment entity.
The dictionary should have a ``url`` key that specifies the download url.
Optionally, the dictionary can be a standard entity hash format with ``id`` and
``type`` keys as long as ``"type"=="Attachment"``. This is only supported for
backwards compatibility (#22150).
If an int value is passed in, the Attachment entity with the matching id will
be downloaded from the Shotgun server.
:param str file_path: Optional file path to write the data directly to local disk. This
avoids loading all of the data in memory and saves the file locally at the given path.
:param id attachment_id: (deprecated) Optional ``id`` of the Attachment entity in Shotgun to
download.
.. note:
This parameter exists only for backwards compatibility for scripts specifying
the parameter with keywords.
:returns: If ``file_path`` is provided, returns the path to the file on disk. If
``file_path`` is ``None``, returns the actual data of the file, as str in Python 2 or
bytes in Python 3.
:rtype: str | bytes
"""
# backwards compatibility when passed via keyword argument
if attachment is False:
if type(attachment_id) == int:
attachment = attachment_id
else:
raise TypeError("Missing parameter 'attachment'. Expected a "
"dict, int, NoneType value or"
"an int for parameter attachment_id")
# write to disk
if file_path:
try:
fp = open(file_path, "wb")
except IOError as e:
raise IOError("Unable to write Attachment to disk using "
"file_path. %s" % e)
url = self.get_attachment_download_url(attachment)
if url is None:
return None
# We only need to set the auth cookie for downloads from Shotgun server
if self.config.server in url:
self.set_up_auth_cookie()
try:
request = urllib.request.Request(url)
request.add_header("user-agent", "; ".join(self._user_agents))
req = urllib.request.urlopen(request)
if file_path:
shutil.copyfileobj(req, fp)
else:
attachment = req.read()
# 400 [sg] Attachment id doesn't exist or is a local file
# 403 [s3] link is invalid
except urllib.error.URLError as e:
if file_path:
fp.close()
err = "Failed to open %s\n%s" % (url, e)
if hasattr(e, "code"):
if e.code == 400:
err += "\nAttachment may not exist or is a local file?"
elif e.code == 403:
# Only parse the body if it is an Amazon S3 url.
if url.find("s3.amazonaws.com") != -1 and e.headers["content-type"] == "application/xml":
body = [six.ensure_text(line) for line in e.readlines()]
if body:
xml = "".join(body)
# Once python 2.4 support is not needed we can think about using
# elementtree. The doc is pretty small so this shouldn't be an issue.
match = re.search("<Message>(.*)</Message>", xml)
if match:
err += " - %s" % (match.group(1))
elif e.code == 409 or e.code == 410:
# we may be dealing with a file that is pending/failed a malware scan, e.g:
# 409: This file is undergoing a malware scan, please try again in a few minutes
# 410: File scanning has detected malware and the file has been quarantined
lines = e.readlines()
if lines:
err += "\n%s\n" % "".join(lines)
raise ShotgunFileDownloadError(err)
else:
if file_path:
if not fp.closed:
fp.close()
return file_path
else:
return attachment
def set_up_auth_cookie(self):
"""
Set up urllib2 with a cookie for authentication on the Shotgun instance.
Looks up session token and sets that in a cookie in the :mod:`urllib2` handler. This is
used internally for downloading attachments from the Shotgun server.
"""
sid = self.get_session_token()
cj = http_cookiejar.LWPCookieJar()
c = http_cookiejar.Cookie("0", "_session_id", sid, None, False, self.config.server, False,
False, "/", True, False, None, True, None, None, {})
cj.set_cookie(c)
cookie_handler = urllib.request.HTTPCookieProcessor(cj)
opener = self._build_opener(cookie_handler)
urllib.request.install_opener(opener)
def get_attachment_download_url(self, attachment):
"""
Return the URL for downloading provided Attachment.
:param mixed attachment: Usually a dict representing An Attachment entity in Shotgun to
return the download url for. If the ``url`` key is present, it will be used as-is for
the download url. If the ``url`` key is not present, a url will be constructed pointing
at the current Shotgun server for downloading the Attachment entity using the ``id``.
If ``None`` is passed in, it is silently ignored in order to avoid raising an error when
results from a :meth:`~shotgun_api3.Shotgun.find` are passed off to
:meth:`~shotgun_api3.Shotgun.download_attachment`
.. note::
Support for passing in an int representing the Attachment ``id`` is deprecated
:returns: the download URL for the Attachment or ``None`` if ``None`` was passed to
``attachment`` parameter.
:rtype: str
"""
# TODO: Support for a standard entity hash should be removed: #22150
attachment_id = None
if isinstance(attachment, int):
attachment_id = attachment
elif isinstance(attachment, dict):
try:
url = attachment["url"]
except KeyError:
if ("id" in attachment and "type" in attachment and attachment["type"] == "Attachment"):
attachment_id = attachment["id"]
else:
raise ValueError("Missing 'url' key in Attachment dict")
elif attachment is None:
url = None
else:
raise TypeError("Unable to determine download url. Expected "
"dict, int, or NoneType. Instead got %s" % type(attachment))
if attachment_id:
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/file_serve/attachment/%s" % urllib.parse.quote(str(attachment_id)),
None, None, None))
return url
def authenticate_human_user(self, user_login, user_password, auth_token=None):
"""
Authenticate Shotgun HumanUser.
Authenticates a user given the login, password, and optionally, one-time auth token (when
two-factor authentication is required). The user must be a ``HumanUser`` entity and the
account must be active.
>>> sg.authenticate_human_user("rhendriks", "c0mPre$Hi0n", None)
{"type": "HumanUser", "id": 123, "name": "Richard Hendriks"}
:param str user_login: Login name of Shotgun HumanUser
:param str user_password: Password for Shotgun HumanUser
:param str auth_token: One-time token required to authenticate Shotgun HumanUser
when two-factor authentication is turned on.
:returns: Standard Shotgun dictionary representing the HumanUser if authentication
succeeded. ``None`` if authentication failed for any reason.
:rtype: dict
"""
if not user_login:
raise ValueError("Please supply a username to authenticate.")
if not user_password:
raise ValueError("Please supply a password for the user.")
# Override permissions on Config obj
original_login = self.config.user_login
original_password = self.config.user_password
original_auth_token = self.config.auth_token
self.config.user_login = user_login
self.config.user_password = user_password
self.config.auth_token = auth_token
try:
data = self.find_one("HumanUser", [["sg_status_list", "is", "act"],
["login", "is", user_login]],
["id", "login"], "", "all")
# Set back to default - There finally and except cannot be used together in python2.4
self.config.user_login = original_login
self.config.user_password = original_password
self.config.auth_token = original_auth_token
return data
except Fault:
# Set back to default - There finally and except cannot be used together in python2.4
self.config.user_login = original_login
self.config.user_password = original_password
self.config.auth_token = original_auth_token
except Exception:
# Set back to default - There finally and except cannot be used together in python2.4
self.config.user_login = original_login
self.config.user_password = original_password
self.config.auth_token = original_auth_token
raise
def update_project_last_accessed(self, project, user=None):
"""
Update a Project's ``last_accessed_by_current_user`` field to the current timestamp.
This helps keep track of the recent Projects each user has worked on and enables scripts
and apps to use this information to display "Recent Projects" for users as a convenience.
.. versionadded::
Requires Shotgun v5.3.20+
>>> sg.update_project_last_accessed({"type": "Project", "id": 66},
... {"type": "HumanUser", "id": 43})
:param dict project: Standard Project entity dictionary
:param dict user: Standard user entity dictionary. This is optional if the current API
instance is using user-based authenitcation, or has specified ``sudo_as_login``. In
these cases, if ``user`` is not provided, the ``sudo_as_login`` value or ``login``
value from the current instance will be used instead.
"""
if self.server_caps.version and self.server_caps.version < (5, 3, 20):
raise ShotgunError("update_project_last_accessed requires server version 5.3.20 or "
"higher, server is %s" % (self.server_caps.version,))
if not user:
# Try to use sudo as user if present
if self.config.sudo_as_login:
user = self.find_one("HumanUser", [["login", "is", self.config.sudo_as_login]])
# Try to use login if present
if self.config.user_login:
user = self.find_one("HumanUser", [["login", "is", self.config.user_login]])
params = {"project_id": project["id"], }
if user:
params["user_id"] = user["id"]
record = self._call_rpc("update_project_last_accessed_by_current_user", params)
self._parse_records(record)[0]
def note_thread_read(self, note_id, entity_fields=None):
"""
Return the full conversation for a given note, including Replies and Attachments.
Returns a complex data structure on the following form::
[{'content': 'Please add more awesomeness to the color grading.',
'created_at': '2015-07-14 21:33:28 UTC',
'created_by': {'id': 38,
'name': 'John Pink',
'status': 'act',
'type': 'HumanUser',
'valid': 'valid'},
'id': 6013,
'type': 'Note'},
{'created_at': '2015-07-14 21:33:32 UTC',
'created_by': {'id': 38,
'name': 'John Pink',
'status': 'act',
'type': 'HumanUser',
'valid': 'valid'},
'id': 159,
'type': 'Attachment'},
{'content': 'More awesomeness added',
'created_at': '2015-07-14 21:54:51 UTC',
'id': 5,
'type': 'Reply',
'user': {'id': 38,
'name': 'David Blue',
'status': 'act',
'type': 'HumanUser',
'valid': 'valid'}}]
The list is returned in descending chronological order.
If you wish to include additional fields beyond the ones that are
returned by default, you can specify these in an entity_fields
dictionary. This dictionary should be keyed by entity type and each
key should contain a list of fields to retrieve, for example::
{ "Note": ["created_by.HumanUser.image",
"addressings_to",
"playlist",
"user" ],
"Reply": ["content"],
"Attachment": ["filmstrip_image",
"local_storage",
"this_file",
"image"]
}
:param int note_id: The id for the note to be retrieved
:param dict entity_fields: Additional fields to retrieve as part of the request.
See above for details.
:returns: A list of dictionaries. See above for example.
:rtype: list
"""
if self.server_caps.version and self.server_caps.version < (6, 2, 0):
raise ShotgunError("note_thread requires server version 6.2.0 or "
"higher, server is %s" % (self.server_caps.version,))
entity_fields = entity_fields or {}
if not isinstance(entity_fields, dict):
raise ValueError("entity_fields parameter must be a dictionary")
params = {"note_id": note_id, "entity_fields": entity_fields}
record = self._call_rpc("note_thread_contents", params)
result = self._parse_records(record)
return result
def text_search(self, text, entity_types, project_ids=None, limit=None):
"""
Search across the specified entity types for the given text.
This method can be used to implement auto completion or a Shotgun global search. The method
requires a text input phrase that is at least three characters long, or an exception will
be raised.
Several ways to limit the results of the query are available:
- Using the ``project_ids`` parameter, you can provide a list of Project ids to search
across. Leaving this at its default value of ``None`` will search across all Shotgun data.
- You need to define which subset of entity types to search using the ``entity_types``
parameter. Each of these entity types can be associated with a filter query to further
reduce the list of matches. The filter list is using the standard filter syntax used by
for example the :meth:`~shotgun_api3.Shotgun.find` method.
**Example: Constrain the search to all Tasks but Character Assets only**
>>> entity_types = {
... "Asset": [["sg_asset_type", "is", "Character"]],
... "Task": []
... }
>>> sg.text_search("bunny", entity_types)
{'matches': [{'id': 734,
'type': 'Asset',
'name': 'Bunny',
'project_id': 65,
'image': 'https://...',
'links': ['', ''],
'status': 'fin'},
...
{'id': 558,
'type': 'Task'
'name': 'FX',
'project_id': 65,
'image': 'https://...',
'links': ['Shot', 'bunny_010_0010'],
'status': 'fin'}],
'terms': ['bunny']}
The links field will contain information about any linked entity. This is useful when, for
example, presenting Tasks and you want to display what Shot or Asset the Task is associated
with.
:param str text: Text to search for. This must be at least three characters long, or an
exception will be raised.
:param dict entity_types: Dictionary to specify which entity types to search across. See
above for usage examples.
:param list project_ids: List of Projects to search. By default, all projects will be
searched.
:param int limit: Specify the maximum number of matches to return.
:returns: A complex dictionary structure, see above for example.
:rtype: dict
"""
if self.server_caps.version and self.server_caps.version < (6, 2, 0):
raise ShotgunError("auto_complete requires server version 6.2.0 or "
"higher, server is %s" % (self.server_caps.version,))
# convert entity_types structure into the form
# that the API endpoint expects
if not isinstance(entity_types, dict):
raise ValueError("entity_types parameter must be a dictionary")
api_entity_types = {}
for (entity_type, filter_list) in six.iteritems(entity_types):
if isinstance(filter_list, (list, tuple)):
resolved_filters = _translate_filters(filter_list, filter_operator=None)
api_entity_types[entity_type] = resolved_filters
else:
raise ValueError("value of entity_types['%s'] must "
"be a list or tuple." % entity_type)
project_ids = project_ids or []
params = {"text": text,
"entity_types": api_entity_types,
"project_ids": project_ids,
"max_results": limit}
record = self._call_rpc("query_display_name_cache", params)
result = self._parse_records(record)[0]
return result
def activity_stream_read(self, entity_type, entity_id, entity_fields=None, min_id=None,
max_id=None, limit=None):
"""
Retrieve activity stream data from Shotgun.
This data corresponds to the data that is displayed in the
Activity tab for an entity in the Shotgun Web UI.
A complex data structure on the following form will be
returned from Shotgun::
{'earliest_update_id': 50,
'entity_id': 65,
'entity_type': 'Project',
'latest_update_id': 79,
'updates': [{'created_at': '2015-07-15 11:06:55 UTC',
'created_by': {'id': 38,
'image': '6641',
'name': 'John Smith',
'status': 'act',
'type': 'HumanUser'},
'id': 79,
'meta': {'entity_id': 6004,
'entity_type': 'Version',
'type': 'new_entity'},
'primary_entity': {'id': 6004,
'name': 'Review_turntable_v2',
'status': 'rev',
'type': 'Version'},
'read': False,
'update_type': 'create'},
{...},
]
}
The main payload of the return data can be found inside the 'updates'
key, containing a list of dictionaries. This list is always returned
in descending date order. Each item may contain different fields
depending on their update type. The primary_entity key represents the
main Shotgun entity that is associated with the update. By default,
this entity is returned with a set of standard fields. By using the
entity_fields parameter, you can extend the returned data to include
additional fields. If for example you wanted to return the asset type
for all assets and the linked sequence for all Shots, pass the
following entity_fields::
{"Shot": ["sg_sequence"], "Asset": ["sg_asset_type"]}
Deep queries can be used in this syntax if you want to
traverse into connected data.
:param str entity_type: Entity type to retrieve activity stream for
:param int entity_id: Entity id to retrieve activity stream for
:param list entity_fields: List of additional fields to include.
See above for details
:param int max_id: Do not retrieve ids greater than this id.
This is useful when implementing paging.
:param int min_id: Do not retrieve ids lesser than this id.
This is useful when implementing caching of
the event stream data and you want to
"top up" an existing cache.
:param int limit: Limit the number of returned records. If not specified,
the system default will be used.
:returns: A complex activity stream data structure. See above for details.
:rtype: dict
"""
if self.server_caps.version and self.server_caps.version < (6, 2, 0):
raise ShotgunError("activity_stream requires server version 6.2.0 or "
"higher, server is %s" % (self.server_caps.version,))
# set up parameters to send to server.
entity_fields = entity_fields or {}
if not isinstance(entity_fields, dict):
raise ValueError("entity_fields parameter must be a dictionary")
params = {"type": entity_type,
"id": entity_id,
"max_id": max_id,
"min_id": min_id,
"limit": limit,
"entity_fields": entity_fields}
record = self._call_rpc("activity_stream", params)
result = self._parse_records(record)[0]
return result
def nav_expand(self, path, seed_entity_field=None, entity_fields=None):
"""
Expand the navigation hierarchy for the supplied path.
.. warning::
This is an experimental method that is not officially part of the
python-api. Usage of this method is discouraged. This method's name,
arguments, and argument types may change at any point.
"""
return self._call_rpc(
"nav_expand",
{
"path": path,
"seed_entity_field": seed_entity_field,
"entity_fields": entity_fields
}
)
def nav_search_string(self, root_path, search_string, seed_entity_field=None):
"""
Search function adapted to work with the navigation hierarchy.
.. warning::
This is an experimental method that is not officially part of the
python-api. Usage of this method is discouraged. This method's name,
arguments, and argument types may change at any point.
"""
return self._call_rpc(
"nav_search",
{
"root_path": root_path,
"seed_entity_field": seed_entity_field,
"search_criteria": {"search_string": search_string}
}
)
def nav_search_entity(self, root_path, entity, seed_entity_field=None):
"""
Search function adapted to work with the navigation hierarchy.
.. warning::
This is an experimental method that is not officially part of the
python-api. Usage of this method is discouraged. This method's name,
arguments, and argument types may change at any point.
"""
return self._call_rpc(
"nav_search",
{
"root_path": root_path,
"seed_entity_field": seed_entity_field,
"search_criteria": {"entity": entity}
}
)
def get_session_token(self):
"""
Get the session token associated with the current session.
If a session token has already been established, this is returned, otherwise a new one is
generated on the server and returned.
>>> sg.get_session_token()
dd638be7d07c39fa73d935a775558a50
:returns: String containing a session token.
:rtype: str
"""
if self.config.session_token:
return self.config.session_token
rv = self._call_rpc("get_session_token", None)
session_token = (rv or {}).get("session_id")
if not session_token:
raise RuntimeError("Could not extract session_id from %s", rv)
self.config.session_token = session_token
return session_token
def preferences_read(self, prefs=None):
"""
Get a subset of the site preferences.
>>> sg.preferences_read()
{
"pref_name": "pref value"
}
:param list prefs: An optional list of preference names to return.
:returns: Dictionary of preferences and their values.
:rtype: dict
"""
if self.server_caps.version and self.server_caps.version < (7, 10, 0):
raise ShotgunError("preferences_read requires server version 7.10.0 or "
"higher, server is %s" % (self.server_caps.version,))
prefs = prefs or []
return self._call_rpc("preferences_read", {"prefs": prefs})
def _build_opener(self, handler):
"""
Build urllib2 opener with appropriate proxy handler.
"""
handlers = []
if self.__ca_certs and not NO_SSL_VALIDATION:
handlers.append(CACertsHTTPSHandler(self.__ca_certs))
if self.config.proxy_handler:
handlers.append(self.config.proxy_handler)
if handler is not None:
handlers.append(handler)
return urllib.request.build_opener(*handlers)
@classmethod
def _get_certs_file(cls, ca_certs):
"""
The following method tells the API where to look for
certificate authorities certificates (we will be referring to these
as CAC from now on). Here's how the Python API interacts with those.
Auth and CRUD operations
========================
These operations are executed with httplib2. httplib2 ships with a
list of CACs instead of asking Python's ssl module for them.
Upload/Downloads
================
These operations are executed using urllib2. urllib2 asks a Python
module called `ssl` for CACs. We have bundled certifi with the API
so that we can be sure the certs are correct at the time of the API
release. This does however mean when the certs change we must update
the API to contain the latest certifi.
This approach is preferable to not using certifi since, on Windows,
ssl searches for CACs in the Windows Certificate Store, on
Linux/macOS, it asks the OpenSSL library linked with Python for CACs.
Depending on how Python was compiled for a given DCC, Python may be
linked against the OpenSSL from the OS or a copy of OpenSSL distributed
with the DCC. This impacts which versions of the certificates are
available to Python, as an OS level OpenSSL will be aware of system
wide certificates that have been added, while an OpenSSL that comes
with a DCC is likely bundling a list of certificates that get update
with each release and may not contain system wide certificates.
Using custom CACs
=================
When a user requires a non-standard CAC, the SHOTGUN_API_CACERTS
environment variable allows to provide an alternate location for
the CACs.
:param ca_certs: A default cert can be provided
:return: The cert file path to use.
"""
if ca_certs is not None:
# certs were provided up front so use these
return ca_certs
elif "SHOTGUN_API_CACERTS" in os.environ:
return os.environ.get("SHOTGUN_API_CACERTS")
else:
# No certs have been specifically provided fallback to using the
# certs shipped with this API.
# We bundle certifi with this API so that we have a higher chance
# of using an uptodate certificate, rather than relying
# on the certs that are bundled with Python or the OS in some cases.
# However we can't use certifi.where() since that searches for the
# cacert.pem file using the sys.path and this means that if another
# copy of certifi can be found first, then it won't use ours.
# So we manually generate the path to the cert, but still use certifi
# to make it easier for updating the bundled cert with the API.
cur_dir = os.path.dirname(os.path.abspath(__file__))
# Now add the rest of the path to the cert file.
cert_file = os.path.join(cur_dir, "lib", "certifi", "cacert.pem")
return cert_file
def _turn_off_ssl_validation(self):
"""
Turn off SSL certificate validation.
"""
global NO_SSL_VALIDATION
self.config.no_ssl_validation = True
NO_SSL_VALIDATION = True
# reset ssl-validation in user-agents
self._user_agents = ["ssl %s (no-validate)" % self.client_caps.ssl_version
if ua.startswith("ssl ") else ua
for ua in self._user_agents]
# Deprecated methods from old wrapper
def schema(self, entity_type):
"""
.. deprecated:: 3.0.0
Use :meth:`~shotgun_api3.Shotgun.schema_field_read` instead.
"""
raise ShotgunError("Deprecated: use schema_field_read('type':'%s') instead" % entity_type)
def entity_types(self):
"""
.. deprecated:: 3.0.0
Use :meth:`~shotgun_api3.Shotgun.schema_entity_read` instead.
"""
raise ShotgunError("Deprecated: use schema_entity_read() instead")
# ========================================================================
# RPC Functions
def _call_rpc(self, method, params, include_auth_params=True, first=False):
"""
Call the specified method on the Shotgun Server sending the supplied payload.
"""
LOG.debug("Starting rpc call to %s with params %s" % (
method, params))
params = self._transform_outbound(params)
payload = self._build_payload(method, params, include_auth_params=include_auth_params)
encoded_payload = self._encode_payload(payload)
req_headers = {
"content-type": "application/json; charset=utf-8",
"connection": "keep-alive"
}
if self.config.localized is True:
req_headers["locale"] = "auto"
attempt = 1
max_attempts = 4 # Three retries on failure
backoff = 0.75 # Seconds to wait before retry, times the attempt number
while attempt <= max_attempts:
http_status, resp_headers, body = self._make_call(
"POST",
self.config.api_path,
encoded_payload,
req_headers,
)
LOG.debug("Completed rpc call to %s" % (method))
try:
self._parse_http_status(http_status)
except ProtocolError as e:
e.headers = resp_headers
# We've seen some rare instances of SG returning 502 for issues that
# appear to be caused by something internal to SG. We're going to
# allow for limited retries for those specifically.
if attempt != max_attempts and e.errcode == 502:
LOG.debug("Got a 502 response. Waiting and retrying...")
time.sleep(float(attempt) * backoff)
attempt += 1
continue
elif e.errcode == 403:
# 403 is returned with custom error page when api access is blocked
e.errmsg += ": %s" % body
raise
else:
break
response = self._decode_response(resp_headers, body)
self._response_errors(response)
response = self._transform_inbound(response)
if not isinstance(response, dict) or "results" not in response:
return response
results = response.get("results")
if first and isinstance(results, list):
return results[0]
return results
def _auth_params(self):
"""
Return a dictionary of the authentication parameters being used.
"""
# Used to authenticate HumanUser credentials
if self.config.user_login and self.config.user_password:
auth_params = {
"user_login": str(self.config.user_login),
"user_password": str(self.config.user_password),
}
if self.config.auth_token:
auth_params["auth_token"] = str(self.config.auth_token)
# Use script name instead
elif self.config.script_name and self.config.api_key:
auth_params = {
"script_name": str(self.config.script_name),
"script_key": str(self.config.api_key),
}
# Authenticate using session_id
elif self.config.session_token:
if self.server_caps.version and self.server_caps.version < (5, 3, 0):
raise ShotgunError("Session token based authentication requires server version "
"5.3.0 or higher, server is %s" % (self.server_caps.version,))
auth_params = {"session_token": str(self.config.session_token)}
# Request server side to raise exception for expired sessions.
# This was added in as part of Shotgun 5.4.4
if self.server_caps.version and self.server_caps.version > (5, 4, 3):
auth_params["reject_if_expired"] = True
else:
raise ValueError("invalid auth params")
if self.config.session_uuid:
auth_params["session_uuid"] = self.config.session_uuid
# Make sure sudo_as_login is supported by server version
if self.config.sudo_as_login:
if self.server_caps.version and self.server_caps.version < (5, 3, 12):
raise ShotgunError("Option 'sudo_as_login' requires server version 5.3.12 or "
"higher, server is %s" % (self.server_caps.version,))
auth_params["sudo_as_login"] = self.config.sudo_as_login
if self.config.extra_auth_params:
auth_params.update(self.config.extra_auth_params)
return auth_params
def _sanitize_auth_params(self, params):
"""
Given an authentication parameter dictionary, sanitize any sensitive
information and return the sanitized dict copy.
"""
sanitized_params = copy.copy(params)
for k in ["user_password", "script_key", "session_token"]:
if k in sanitized_params:
sanitized_params[k] = "********"
return sanitized_params
def _build_payload(self, method, params, include_auth_params=True):
"""
Build the payload to be send to the rpc endpoint.
"""
if not method:
raise ValueError("method is empty")
call_params = []
if include_auth_params:
auth_params = self._auth_params()
call_params.append(auth_params)
if params:
call_params.append(params)
return {
"method_name": method,
"params": call_params
}
def _encode_payload(self, payload):
"""
Encode the payload to a string to be passed to the rpc endpoint.
The payload is json encoded as a unicode string if the content
requires it. The unicode string is then encoded as 'utf-8' as it must
be in a single byte encoding to go over the wire.
"""
wire = json.dumps(payload, ensure_ascii=False)
return six.ensure_binary(wire)
def _make_call(self, verb, path, body, headers):
"""
Make an HTTP call to the server.
Handles retry and failure.
"""
attempt = 0
req_headers = {}
req_headers["user-agent"] = "; ".join(self._user_agents)
if self.config.authorization:
req_headers["Authorization"] = self.config.authorization
req_headers.update(headers or {})
body = body or None
max_rpc_attempts = self.config.max_rpc_attempts
rpc_attempt_interval = self.config.rpc_attempt_interval / 1000.0
while (attempt < max_rpc_attempts):
attempt += 1
try:
return self._http_request(verb, path, body, req_headers)
except ssl_error_classes as e:
# Test whether the exception is due to the fact that this is an older version of
# Python that cannot validate certificates encrypted with SHA-2. If it is, then
# fall back on disabling the certificate validation and try again - unless the
# SHOTGUN_FORCE_CERTIFICATE_VALIDATION environment variable has been set by the
# user. In that case we simply raise the exception. Any other exceptions simply
# get raised as well.
#
# For more info see:
# http://blog.shotgunsoftware.com/2016/01/important-ssl-certificate-renewal-and.html
#
# SHA-2 errors look like this:
# [Errno 1] _ssl.c:480: error:0D0C50A1:asn1 encoding routines:ASN1_item_verify:
# unknown message digest algorithm
#
# Any other exceptions simply get raised.
if "unknown message digest algorithm" not in str(e) or \
"SHOTGUN_FORCE_CERTIFICATE_VALIDATION" in os.environ:
raise
if self.config.no_ssl_validation is False:
LOG.warning("SSL Error: this Python installation is incompatible with "
"certificates signed with SHA-2. Disabling certificate validation. "
"For more information, see http://blog.shotgunsoftware.com/2016/01/"
"important-ssl-certificate-renewal-and.html")
self._turn_off_ssl_validation()
# reload user agent to reflect that we have turned off ssl validation
req_headers["user-agent"] = "; ".join(self._user_agents)
self._close_connection()
if attempt == max_rpc_attempts:
raise
except Exception:
self._close_connection()
if attempt == max_rpc_attempts:
LOG.debug("Request failed. Giving up after %d attempts." % attempt)
raise
LOG.debug(
"Request failed, attempt %d of %d. Retrying in %.2f seconds..." %
(attempt, max_rpc_attempts, rpc_attempt_interval)
)
time.sleep(rpc_attempt_interval)
def _http_request(self, verb, path, body, headers):
"""
Make the actual HTTP request.
"""
url = urllib.parse.urlunparse((self.config.scheme, self.config.server, path, None, None, None))
LOG.debug("Request is %s:%s" % (verb, url))
LOG.debug("Request headers are %s" % headers)
LOG.debug("Request body is %s" % body)
conn = self._get_connection()
resp, content = conn.request(url, method=verb, body=body, headers=headers)
# http response code is handled else where
http_status = (resp.status, resp.reason)
resp_headers = dict(
(k.lower(), v)
for k, v in six.iteritems(resp)
)
resp_body = content
LOG.debug("Response status is %s %s" % http_status)
LOG.debug("Response headers are %s" % resp_headers)
LOG.debug("Response body is %s" % resp_body)
return (http_status, resp_headers, resp_body)
def _parse_http_status(self, status):
"""
Parse the status returned from the http request.
:param tuple status: Tuple of (code, reason).
:raises: RuntimeError if the http status is non success.
"""
error_code = status[0]
errmsg = status[1]
if status[0] >= 300:
headers = "HTTP error from server"
if status[0] == 503:
errmsg = "ShotGrid is currently down for maintenance or too busy to reply. Please try again later."
raise ProtocolError(self.config.server,
error_code,
errmsg,
headers)
return
def _decode_response(self, headers, body):
"""
Decode the response from the server from the wire format to
a python data structure.
:param dict headers: Headers from the server.
:param str body: Raw response body from the server.
:returns: If the content-type starts with application/json or
text/javascript the body is json decoded. Otherwise the raw body is
returned.
:rtype: str
"""
if not body:
return body
ct = (headers.get("content-type") or "application/json").lower()
if ct.startswith("application/json") or ct.startswith("text/javascript"):
return self._json_loads(body)
return body
def _json_loads(self, body):
return json.loads(body)
def _json_loads_ascii(self, body):
"""
See http://stackoverflow.com/questions/956867
"""
def _decode_list(lst):
newlist = []
for i in lst:
if isinstance(i, six.text_type):
i = six.ensure_str(i)
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
newdict = {}
for k, v in six.iteritems(dct):
if isinstance(k, six.text_type):
k = six.ensure_str(k)
if isinstance(v, six.text_type):
v = six.ensure_str(v)
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
return json.loads(body, object_hook=_decode_dict)
def _response_errors(self, sg_response):
"""
Raise any API errors specified in the response.
:raises ShotgunError: If the server response contains an exception.
"""
# error code for authentication related problems
ERR_AUTH = 102
# error code when 2FA authentication is required but no 2FA token provided.
ERR_2FA = 106
# error code when SSO is activated on the site, preventing the use of username/password for authentication.
ERR_SSO = 108
# error code when Oxygen is activated on the site, preventing the use of username/password for authentication.
ERR_OXYG = 110
if isinstance(sg_response, dict) and sg_response.get("exception"):
if sg_response.get("error_code") == ERR_AUTH:
raise AuthenticationFault(sg_response.get("message", "Unknown Authentication Error"))
elif sg_response.get("error_code") == ERR_2FA:
raise MissingTwoFactorAuthenticationFault(
sg_response.get("message", "Unknown 2FA Authentication Error")
)
elif sg_response.get("error_code") == ERR_SSO:
raise UserCredentialsNotAllowedForSSOAuthenticationFault(
sg_response.get("message",
"Authentication using username/password is not "
"allowed for an SSO-enabled ShotGrid site")
)
elif sg_response.get("error_code") == ERR_OXYG:
raise UserCredentialsNotAllowedForOxygenAuthenticationFault(
sg_response.get("message", "Authentication using username/password is not "
"allowed for an Autodesk Identity enabled ShotGrid site")
)
else:
# raise general Fault
raise Fault(sg_response.get("message", "Unknown Error"))
return
def _visit_data(self, data, visitor):
"""
Walk the data (simple python types) and call the visitor.
"""
if not data:
return data
recursive = self._visit_data
if isinstance(data, list):
return [recursive(i, visitor) for i in data]
if isinstance(data, tuple):
return tuple(recursive(i, visitor) for i in data)
if isinstance(data, dict):
return dict(
(k, recursive(v, visitor))
for k, v in six.iteritems(data)
)
return visitor(data)
def _transform_outbound(self, data):
"""
Transform data types or values before they are sent by the client.
- changes timezones
- converts dates and times to strings
"""
if self.config.convert_datetimes_to_utc:
def _change_tz(value):
if value.tzinfo is None:
value = value.replace(tzinfo=SG_TIMEZONE.local)
return value.astimezone(SG_TIMEZONE.utc)
else:
_change_tz = None
local_now = datetime.datetime.now()
def _outbound_visitor(value):
if isinstance(value, datetime.datetime):
if _change_tz:
value = _change_tz(value)
return value.strftime("%Y-%m-%dT%H:%M:%SZ")
if isinstance(value, datetime.date):
# existing code did not tz transform dates.
return value.strftime("%Y-%m-%d")
if isinstance(value, datetime.time):
value = local_now.replace(
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond
)
if _change_tz:
value = _change_tz(value)
return value.strftime("%Y-%m-%dT%H:%M:%SZ")
# ensure return is six.text_type
if isinstance(value, six.string_types):
return six.ensure_text(value)
return value
return self._visit_data(data, _outbound_visitor)
def _transform_inbound(self, data):
"""
Transforms data types or values after they are received from the server.
"""
# NOTE: The time zone is removed from the time after it is transformed
# to the local time, otherwise it will fail to compare to datetimes
# that do not have a time zone.
if self.config.convert_datetimes_to_utc:
def _change_tz(x):
return x.replace(tzinfo=SG_TIMEZONE.utc).astimezone(SG_TIMEZONE.local)
else:
_change_tz = None
def _inbound_visitor(value):
if isinstance(value, six.string_types):
if len(value) == 20 and self._DATE_TIME_PATTERN.match(value):
try:
# strptime was not on datetime in python2.4
value = datetime.datetime(
*time.strptime(value, "%Y-%m-%dT%H:%M:%SZ")[:6])
except ValueError:
return value
if _change_tz:
return _change_tz(value)
return value
return value
return self._visit_data(data, _inbound_visitor)
# ========================================================================
# Connection Functions
def _get_connection(self):
"""
Return the current connection or creates a new connection to the current server.
"""
if self._connection is not None:
return self._connection
if self.config.proxy_server:
pi = ProxyInfo(socks.PROXY_TYPE_HTTP, self.config.proxy_server,
self.config.proxy_port, proxy_user=self.config.proxy_user,
proxy_pass=self.config.proxy_pass)
self._connection = Http(timeout=self.config.timeout_secs, ca_certs=self.__ca_certs,
proxy_info=pi, disable_ssl_certificate_validation=self.config.no_ssl_validation)
else:
self._connection = Http(timeout=self.config.timeout_secs, ca_certs=self.__ca_certs,
proxy_info=None, disable_ssl_certificate_validation=self.config.no_ssl_validation)
return self._connection
def _close_connection(self):
"""
Close the current connection.
"""
if self._connection is None:
return
for conn in self._connection.connections.values():
try:
conn.close()
except Exception:
pass
self._connection.connections.clear()
self._connection = None
return
# ========================================================================
# Utility
def _parse_records(self, records):
"""
Parse 'records' returned from the api to do local modifications:
- Insert thumbnail urls
- Insert local file paths.
- Revert < html entities that may be the result of input sanitization
mechanisms back to a litteral < character.
:param records: List of records (dicts) to process or a single record.
:returns: A list of the records processed.
"""
if not records:
return []
if not isinstance(records, (list, tuple)):
records = [records, ]
for rec in records:
# skip results that aren't entity dictionaries
if not isinstance(rec, dict):
continue
# iterate over each item and check each field for possible injection
for k, v in six.iteritems(rec):
if not v:
continue
# Check for html entities in strings
if isinstance(v, str):
rec[k] = rec[k].replace("<", "<")
# check for thumbnail for older version (<3.3.0) of shotgun
if k == "image" and self.server_caps.version and self.server_caps.version < (3, 3, 0):
rec["image"] = self._build_thumb_url(rec["type"], rec["id"])
continue
if isinstance(v, dict) and v.get("link_type") == "local" and self.client_caps.local_path_field in v:
local_path = v[self.client_caps.local_path_field]
v["local_path"] = local_path
v["url"] = "file://%s" % (local_path or "",)
return records
def _build_thumb_url(self, entity_type, entity_id):
"""
Return the URL for the thumbnail of an entity given the entity type and the entity id.
Note: This makes a call to the server for every thumbnail.
:param entity_type: Entity type the id is for.
:param entity_id: id of the entity to get the thumbnail for.
:returns: Fully qualified url to the thumbnail.
"""
# Example response from the end point
# curl "https://foo.com/upload/get_thumbnail_url?entity_type=Version&entity_id=1"
# 1
# /files/0000/0000/0012/232/shot_thumb.jpg.jpg
entity_info = {"e_type": urllib.parse.quote(entity_type),
"e_id": urllib.parse.quote(str(entity_id))}
url = ("/upload/get_thumbnail_url?" +
"entity_type=%(e_type)s&entity_id=%(e_id)s" % entity_info)
body = self._make_call("GET", url, None, None)[2]
code, thumb_url = body.splitlines()
code = int(code)
# code of 0 means error, second line is the error code
if code == 0:
raise ShotgunError(thumb_url)
if code == 1:
return urllib.parse.urlunparse((self.config.scheme,
self.config.server,
thumb_url.strip(),
None, None, None))
# Comments in prev version said we can get this sometimes.
raise RuntimeError("Unknown code %s %s" % (code, thumb_url))
def _dict_to_list(self, d, key_name="field_name", value_name="value", extra_data=None):
"""
Utility function to convert a dict into a list dicts using the key_name and value_name keys.
e.g. d {'foo' : 'bar'} changed to [{'field_name':'foo', 'value':'bar'}]
Any dictionary passed in via extra_data will be merged into the resulting dictionary.
e.g. d as above and extra_data of {'foo': {'thing1': 'value1'}} changes into
[{'field_name': 'foo', 'value': 'bar', 'thing1': 'value1'}]
"""
ret = []
for k, v in six.iteritems((d or {})):
d = {key_name: k, value_name: v}
d.update((extra_data or {}).get(k, {}))
ret.append(d)
return ret
def _dict_to_extra_data(self, d, key_name="value"):
"""
Utility function to convert a dict into a dict compatible with the extra_data arg
of _dict_to_list.
e.g. d {'foo' : 'bar'} changed to {'foo': {"value": 'bar'}]
"""
return dict([(k, {key_name: v}) for (k, v) in six.iteritems((d or {}))])
def _upload_file_to_storage(self, path, storage_url):
"""
Internal function to upload an entire file to the Cloud storage.
:param str path: Full path to an existing non-empty file on disk to upload.
:param str storage_url: Target URL for the uploaded file.
"""
filename = os.path.basename(path)
fd = open(path, "rb")
try:
content_type = mimetypes.guess_type(filename)[0]
content_type = content_type or "application/octet-stream"
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
self._upload_data_to_storage(fd, content_type, file_size, storage_url)
finally:
fd.close()
LOG.debug("File uploaded to Cloud storage: %s", filename)
def _multipart_upload_file_to_storage(self, path, upload_info):
"""
Internal function to upload a file to the Cloud storage in multiple parts.
:param str path: Full path to an existing non-empty file on disk to upload.
:param dict upload_info: Contains details received from the server, about the upload.
"""
fd = open(path, "rb")
try:
content_type = mimetypes.guess_type(path)[0]
content_type = content_type or "application/octet-stream"
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = os.path.basename(path)
etags = []
part_number = 1
bytes_read = 0
chunk_size = self._MULTIPART_UPLOAD_CHUNK_SIZE
while bytes_read < file_size:
data = fd.read(chunk_size)
data_size = len(data)
# keep data as a stream so that we don't need to worry how it was
# encoded.
data = BytesIO(data)
bytes_read += data_size
part_url = self._get_upload_part_link(upload_info, filename, part_number)
etags.append(self._upload_data_to_storage(data, content_type, data_size, part_url))
part_number += 1
self._complete_multipart_upload(upload_info, filename, etags)
finally:
fd.close()
LOG.debug("File uploaded in multiple parts to Cloud storage: %s", path)
def _get_upload_part_link(self, upload_info, filename, part_number):
"""
Internal function to get the url to upload the next part of a file to the
Cloud storage, in a multi-part upload process.
:param dict upload_info: Contains details received from the server, about the upload.
:param str filename: Name of the file for which we want the link.
:param int part_number: Part number for the link.
:returns: upload url.
:rtype: str
"""
params = {
"upload_type": upload_info["upload_type"],
"filename": filename,
"timestamp": upload_info["timestamp"],
"upload_id": upload_info["upload_id"],
"part_number": part_number
}
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/upload/api_get_upload_link_for_part", None, None, None))
result = self._send_form(url, params)
# Response is of the form: 1\n<url> (for success) or 0\n (for failure).
# In case of success, we know we the second line of the response contains the
# requested URL.
if not result.startswith("1"):
raise ShotgunError("Unable get upload part link: %s" % result)
LOG.debug("Got next upload link from server for multipart upload.")
return result.split("\n", 2)[1]
def _upload_data_to_storage(self, data, content_type, size, storage_url):
"""
Internal function to upload data to Cloud storage.
:param stream data: Contains details received from the server, about the upload.
:param str content_type: Content type of the data stream.
:param int size: Number of bytes in the data stream.
:param str storage_url: Target URL for the uploaded file.
:returns: upload url.
:rtype: str
"""
try:
opener = self._build_opener(urllib.request.HTTPHandler)
request = urllib.request.Request(storage_url, data=data)
request.add_header("Content-Type", content_type)
request.add_header("Content-Length", size)
request.get_method = lambda: "PUT"
result = opener.open(request)
etag = result.info()["Etag"]
except urllib.error.HTTPError as e:
if e.code == 500:
raise ShotgunError("Server encountered an internal error.\n%s\n%s\n\n" % (storage_url, e))
else:
raise ShotgunError("Unanticipated error occurred uploading to %s: %s" % (storage_url, e))
LOG.debug("Part upload completed successfully.")
return etag
def _complete_multipart_upload(self, upload_info, filename, etags):
"""
Internal function to complete a multi-part upload to the Cloud storage.
:param dict upload_info: Contains details received from the server, about the upload.
:param str filename: Name of the file for which we want to complete the upload.
:param tupple etags: Contains the etag of each uploaded file part.
"""
params = {
"upload_type": upload_info["upload_type"],
"filename": filename,
"timestamp": upload_info["timestamp"],
"upload_id": upload_info["upload_id"],
"etags": ",".join(etags)
}
url = urllib.parse.urlunparse((self.config.scheme, self.config.server,
"/upload/api_complete_multipart_upload", None, None, None))
result = self._send_form(url, params)
# Response is of the form: 1\n or 0\n to indicate success or failure of the call.
if not result.startswith("1"):
raise ShotgunError("Unable get upload part link: %s" % result)
def _requires_direct_s3_upload(self, entity_type, field_name):
"""
Internal function that determines if an entity_type + field_name combination
should be uploaded to cloud storage.
The info endpoint should return `s3_enabled_upload_types` which contains an object like the following:
{
'Version': ['sg_uploaded_movie'],
'Attachment': '*',
'*': ['this_file']
}
:param str entity_type: The entity type of the file being uploaded.
:param str field_name: The matching field name for the file being uploaded.
:returns: Whether the field + entity type combination should be uploaded to cloud storage.
:rtype: bool
"""
supported_s3_types = self.server_info.get("s3_enabled_upload_types") or {}
supported_fields = supported_s3_types.get(entity_type) or []
supported_star_fields = supported_s3_types.get("*") or []
# If direct uploads are enabled
if self.server_info.get("s3_direct_uploads_enabled", False):
# If field_name is part of a supported entity_type
if field_name in supported_fields or field_name in supported_star_fields:
return True
# If supported_fields is a string or a list with *
if isinstance(supported_fields, list) and "*" in supported_fields:
return True
elif supported_fields == "*":
return True
# If supported_star_fields is a list containing * or * as a string
if isinstance(supported_star_fields, list) and "*" in supported_star_fields:
return True
elif supported_star_fields == "*":
return True
# Support direct upload for old versions of shotgun
return entity_type == "Version" and field_name == "sg_uploaded_movie"
else:
return False
def _send_form(self, url, params):
"""
Utility function to send a Form to Shotgun and process any HTTP errors that
could occur.
:param url: endpoint where the form is sent.
:param params: form data
:returns: result from the server.
"""
params.update(self._auth_params())
opener = self._build_opener(FormPostHandler)
# Perform the request
try:
resp = opener.open(url, params)
result = resp.read()
# response headers are in str(resp.info()).splitlines()
except urllib.error.HTTPError as e:
if e.code == 500:
raise ShotgunError("Server encountered an internal error. "
"\n%s\n(%s)\n%s\n\n" % (url, self._sanitize_auth_params(params), e))
else:
raise ShotgunError("Unanticipated error occurred %s" % (e))
return six.ensure_text(result)
class CACertsHTTPSConnection(http_client.HTTPConnection):
""""
This class allows to create an HTTPS connection that uses the custom certificates
passed in.
"""
default_port = http_client.HTTPS_PORT
def __init__(self, *args, **kwargs):
"""
:param args: Positional arguments passed down to the base class.
:param ca_certs: Path to the custom CA certs file.
:param kwargs: Keyword arguments passed down to the bas class
"""
# Pop that argument,
self.__ca_certs = kwargs.pop("ca_certs")
http_client.HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
"Connect to a host on a given (SSL) port."
http_client.HTTPConnection.connect(self)
# Now that the regular HTTP socket has been created, wrap it with our SSL certs.
self.sock = ssl.wrap_socket(
self.sock,
ca_certs=self.__ca_certs,
cert_reqs=ssl.CERT_REQUIRED
)
class CACertsHTTPSHandler(urllib.request.HTTPSHandler):
"""
Handler that ensures https connections are created with the custom CA certs.
"""
def __init__(self, cacerts):
urllib.request.HTTPSHandler.__init__(self)
self.__ca_certs = cacerts
def https_open(self, req):
return self.do_open(self.create_https_connection, req)
def create_https_connection(self, *args, **kwargs):
return CACertsHTTPSConnection(*args, ca_certs=self.__ca_certs, **kwargs)
# Helpers from the previous API, left as is.
# Based on http://code.activestate.com/recipes/146306/
class FormPostHandler(urllib.request.BaseHandler):
"""
Handler for multipart form data
"""
handler_order = urllib.request.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
# get_data was removed in 3.4. since we're testing against 3.6 and
# 3.7, this should be sufficient.
if six.PY3:
data = request.data
else:
data = request.get_data()
if data is not None and not isinstance(data, six.string_types):
files = []
params = []
for key, value in data.items():
if isinstance(value, sgsix.file_types):
files.append((key, value))
else:
params.append((key, value))
if not files:
data = six.ensure_binary(urllib.parse.urlencode(params, True)) # sequencing on
else:
boundary, data = self.encode(params, files)
content_type = "multipart/form-data; boundary=%s" % boundary
request.add_unredirected_header("Content-Type", content_type)
# add_data was removed in 3.4. since we're testing against 3.6 and
# 3.7, this should be sufficient.
if six.PY3:
request.data = data
else:
request.add_data(data)
return request
def encode(self, params, files, boundary=None, buffer=None):
if boundary is None:
# Per https://stackoverflow.com/a/27174474
# use a random string as the boundary if none was provided --
# use uuid since mimetools no longer exists in Python 3.
# We'll do this across both python 2/3 rather than add more branching.
boundary = uuid.uuid4()
if buffer is None:
buffer = BytesIO()
for (key, value) in params:
if not isinstance(value, six.string_types):
# If value is not a string (e.g. int) cast to text
value = six.text_type(value)
value = six.ensure_text(value)
key = six.ensure_text(key)
buffer.write(six.ensure_binary("--%s\r\n" % boundary))
buffer.write(six.ensure_binary("Content-Disposition: form-data; name=\"%s\"" % key))
buffer.write(six.ensure_binary("\r\n\r\n%s\r\n" % value))
for (key, fd) in files:
# On Windows, it's possible that we were forced to open a file
# with non-ascii characters as unicode. In that case, we need to
# encode it as a utf-8 string to remove unicode from the equation.
# If we don't, the mix of unicode and strings going into the
# buffer can cause UnicodeEncodeErrors to be raised.
filename = fd.name
filename = six.ensure_text(filename)
filename = filename.split("/")[-1]
key = six.ensure_text(key)
content_type = mimetypes.guess_type(filename)[0]
content_type = content_type or "application/octet-stream"
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
buffer.write(six.ensure_binary("--%s\r\n" % boundary))
c_dis = "Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"%s"
content_disposition = c_dis % (key, filename, "\r\n")
buffer.write(six.ensure_binary(content_disposition))
buffer.write(six.ensure_binary("Content-Type: %s\r\n" % content_type))
buffer.write(six.ensure_binary("Content-Length: %s\r\n" % file_size))
buffer.write(six.ensure_binary("\r\n"))
fd.seek(0)
shutil.copyfileobj(fd, buffer)
buffer.write(six.ensure_binary("\r\n"))
buffer.write(six.ensure_binary("--%s--\r\n\r\n" % boundary))
buffer = buffer.getvalue()
return boundary, buffer
def https_request(self, request):
return self.http_request(request)
def _translate_filters(filters, filter_operator):
"""
Translate filters params into data structure expected by rpc call.
"""
wrapped_filters = {
"filter_operator": filter_operator or "all",
"filters": filters
}
return _translate_filters_dict(wrapped_filters)
def _translate_filters_dict(sg_filter):
new_filters = {}
filter_operator = sg_filter.get("filter_operator")
if filter_operator == "all" or filter_operator == "and":
new_filters["logical_operator"] = "and"
elif filter_operator == "any" or filter_operator == "or":
new_filters["logical_operator"] = "or"
else:
raise ShotgunError("Invalid filter_operator %s" % filter_operator)
if not isinstance(sg_filter["filters"], (list, tuple)):
raise ShotgunError("Invalid filters, expected a list or a tuple, got %s"
% sg_filter["filters"])
new_filters["conditions"] = _translate_filters_list(sg_filter["filters"])
return new_filters
def _translate_filters_list(filters):
conditions = []
for sg_filter in filters:
if isinstance(sg_filter, (list, tuple)):
conditions.append(_translate_filters_simple(sg_filter))
elif isinstance(sg_filter, dict):
conditions.append(_translate_filters_dict(sg_filter))
else:
raise ShotgunError("Invalid filters, expected a list, tuple or dict, got %s"
% sg_filter)
return conditions
def _translate_filters_simple(sg_filter):
condition = {
"path": sg_filter[0],
"relation": sg_filter[1]
}
values = sg_filter[2:]
if len(values) == 1 and isinstance(values[0], (list, tuple)):
values = values[0]
condition["values"] = values
return condition
def _version_str(version):
"""
Convert a tuple of int's to a '.' separated str.
"""
return ".".join(map(str, version))
| {
"content_hash": "7d38be4eadc154c3a21f28542eaa9782",
"timestamp": "",
"source": "github",
"line_count": 4329,
"max_line_length": 184,
"avg_line_length": 44.34488334488334,
"alnum_prop": 0.5635753689397768,
"repo_name": "cpenv/autocpenv",
"id": "2683c3969d1dcb8a456a2d8548ce295b32b46c66",
"size": "191991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/cpenv/vendor/shotgun_api3/shotgun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155838"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
import os
import time
import urllib
import uuid
from xml.dom import minidom
from xml.parsers import expat
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
from nova.openstack.common import versionutils
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import hardware
from nova.virt import netutils
from nova.virt.xenapi import agent
from nova.virt.xenapi.image import utils as image_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
cfg.StrOpt('cache_images',
default='all',
help='Cache glance images locally. `all` will cache all'
' images, `some` will only cache images that have the'
' image_property `cache_in_nova=True`, and `none` turns'
' off caching entirely'),
cfg.IntOpt('image_compression_level',
help='Compression level for images, e.g., 9 for gzip -9.'
' Range is 1-9, 9 being most compressed but most CPU'
' intensive on dom0.'),
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * units.Mi,
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='default-sr:true',
help='Filter for finding the SR to be used to install guest '
'instances on. To use the Local Storage in default '
'XenServer/XCP installations set this flag to '
'other-config:i18n-key=local-storage. To select an SR '
'with a different matching criteria, you could set it to '
'other-config:my_favorite_sr=true. On the other hand, to '
'fall back on the Default SR, as displayed by XenCenter, '
'set this flag to: default-sr:true'),
cfg.BoolOpt('sparse_copy',
default=True,
help='Whether to use sparse_copy for copying data on a '
'resize down (False will use standard dd). This speeds '
'up resizes down considerably since large runs of zeros '
'won\'t have to be rsynced'),
cfg.IntOpt('num_vbd_unplug_retries',
default=10,
help='Maximum number of retries to unplug VBD. if <=0, '
'should try once and no retry'),
cfg.StrOpt('torrent_images',
default='none',
help='Whether or not to download images via Bit Torrent '
'(all|some|none).'),
cfg.StrOpt('ipxe_network_name',
help='Name of network to use for booting iPXE ISOs'),
cfg.StrOpt('ipxe_boot_menu_url',
help='URL to the iPXE boot menu'),
cfg.StrOpt('ipxe_mkisofs_cmd',
default='mkisofs',
help='Name and optionally path of the tool used for '
'ISO image creation'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vm_utils_opts, 'xenserver')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
MAX_VDI_CHAIN_SIZE = 16
PROGRESS_INTERVAL_SECONDS = 300
# Fudge factor to allow for the VHD chain to be slightly larger than
# the partitioned space. Otherwise, legitimate images near their
# maximum allowed size can fail on build with FlavorDiskTooSmall.
VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10
class ImageType(object):
"""Enumeration class for distinguishing different image types
| 0 - kernel image (goes on dom0's filesystem)
| 1 - ramdisk image (goes on dom0's filesystem)
| 2 - disk image (local SR, partitioned by objectstore plugin)
| 3 - raw disk image (local SR, NOT partitioned by plugin)
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
| 6 - config drive
"""
KERNEL = 0
RAMDISK = 1
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
DISK_CONFIGDRIVE = 6
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "root"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
return dict(zip(cls._ids, ImageType._strs)).get(image_type)
@classmethod
def get_role(cls, image_type_id):
"""Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
cls.DISK_ISO: 'iso',
cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
def get_vm_device_id(session, image_properties):
# NOTE: device_id should be 2 for windows VMs which run new xentools
# (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more
# information.
if image_properties is None:
image_properties = {}
device_id = image_properties.get('xenapi_device_id')
# The device_id is required to be set for hypervisor version 6.1 and above
if device_id:
hypervisor_version = session.product_version
if _hypervisor_supports_device_id(hypervisor_version):
return device_id
else:
msg = _("Device id %(id)s specified is not supported by "
"hypervisor version %(version)s") % {'id': device_id,
'version': hypervisor_version}
raise exception.NovaException(msg)
def _hypervisor_supports_device_id(version):
version_as_string = '.'.join(str(v) for v in version)
return(versionutils.is_compatible('6.1', version_as_string))
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False, device_id=None):
"""Create a VM record. Returns new VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
flavor = instance.get_flavor()
mem = str(long(flavor.memory_mb) * units.Mi)
vcpus = str(flavor.vcpus)
vcpu_weight = flavor.vcpu_weight
vcpu_params = {}
if vcpu_weight is not None:
# NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means
# we need to specify both weight and cap for either to apply
vcpu_params = {"weight": str(vcpu_weight), "cap": "0"}
cpu_mask_list = hardware.get_vcpu_pin_set()
if cpu_mask_list:
cpu_mask = hardware.format_cpu_spec(cpu_mask_list,
allow_ranges=False)
vcpu_params["mask"] = cpu_mask
viridian = 'true' if instance['os_type'] == 'windows' else 'false'
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
'other_config': {'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': viridian, 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': vcpu_params,
'xenstore_data': {'vm-data/allowvssprovider': 'false'}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance['kernel_id']:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
if device_id:
rec['platform']['device_id'] = device_id
vm_ref = session.VM.create(rec)
LOG.debug('Created VM', instance=instance)
return vm_ref
def destroy_vm(session, instance, vm_ref):
"""Destroys a VM record."""
try:
session.VM.destroy(vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Destroy VM failed'))
return
LOG.debug("VM destroyed", instance=instance)
def clean_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug("Shutting down VM (cleanly)", instance=instance)
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (cleanly) failed.'))
return False
return True
def hard_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug("Shutting down VM (hard)", instance=instance)
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (hard) failed'))
return False
return True
def is_vm_shutdown(session, vm_ref):
state = get_power_state(session, vm_ref)
if state == power_state.SHUTDOWN:
return True
return False
def is_enough_free_mem(session, instance):
flavor = instance.get_flavor()
mem = long(flavor.memory_mb) * units.Mi
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
session.host_ref))
return host_free_mem >= mem
def _should_retry_unplug_vbd(err):
# Retry if unplug failed with DEVICE_DETACH_REJECTED
# For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
# Since XenServer 6.2, we also need to retry if we get
# INTERNAL_ERROR, as that error goes away when you retry.
return (err == 'DEVICE_DETACH_REJECTED'
or
err == 'INTERNAL_ERROR')
def unplug_vbd(session, vbd_ref, this_vm_ref):
# make sure that perform at least once
max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1
for num_attempt in range(1, max_attempts + 1):
try:
if num_attempt > 1:
greenthread.sleep(1)
session.VBD.unplug(vbd_ref, this_vm_ref)
return
except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_LI('VBD %s already detached'), vbd_ref)
return
elif _should_retry_unplug_vbd(err):
LOG.info(_LI('VBD %(vbd_ref)s uplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'),
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err})
else:
LOG.exception(_LE('Unable to unplug VBD'))
raise exception.StorageError(
reason=_('Unable to unplug VBD %s') % vbd_ref)
raise exception.StorageError(
reason=_('Reached maximum number of retries '
'trying to unplug VBD %s')
% vbd_ref)
def destroy_vbd(session, vbd_ref):
"""Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to destroy VBD'))
raise exception.StorageError(
reason=_('Unable to destroy VBD %s') % vbd_ref)
def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
read_only=False, bootable=False, osvol=False,
empty=False, unpluggable=True):
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
if vdi_ref is None:
vdi_ref = 'OpaqueRef:NULL'
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = vbd_type
vbd_rec['unpluggable'] = unpluggable
vbd_rec['empty'] = empty
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ',
{'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.',
{'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
if osvol:
# set osvol=True in other-config to indicate this is an
# attached nova (or cinder) volume
session.call_xenapi('VBD.add_to_other_config',
vbd_ref, 'osvol', 'True')
return vbd_ref
def attach_cd(session, vm_ref, vdi_ref, userdevice):
"""Create an empty VBD, then insert the CD."""
vbd_ref = create_vbd(session, vm_ref, None, userdevice,
vbd_type='cd', read_only=True,
bootable=True, empty=True,
unpluggable=False)
session.call_xenapi('VBD.insert', vbd_ref, vdi_ref)
return vbd_ref
def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure:
msg = "Unable to destroy VDI %s" % vdi_ref
LOG.debug(msg, exc_info=True)
msg = _("Unable to destroy VDI %s") % vdi_ref
LOG.error(msg)
raise exception.StorageError(reason=msg)
def safe_destroy_vdis(session, vdi_refs):
"""Tries to destroy the requested VDIs, but ignores any errors."""
for vdi_ref in vdi_refs:
try:
destroy_vdi(session, vdi_ref)
except exception.StorageError:
msg = "Ignoring error while destroying VDI: %s" % vdi_ref
LOG.debug(msg)
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': _get_vdi_other_config(disk_type, instance=instance),
'sm_config': {},
'tags': []})
LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.',
{'vdi_ref': vdi_ref, 'name_label': name_label,
'virtual_size': virtual_size, 'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref
@contextlib.contextmanager
def _dummy_vm(session, instance, vdi_ref):
"""This creates a temporary VM so that we can snapshot a VDI.
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
work around this, we need to create a temporary VM and then map the VDI to
the VM using a temporary VBD.
"""
name_label = "dummy"
vm_ref = create_vm(session, instance, name_label, None, None)
try:
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
read_only=True)
try:
yield vm_ref
finally:
try:
destroy_vbd(session, vbd_ref)
except exception.StorageError:
# destroy_vbd() will log error
pass
finally:
destroy_vm(session, instance, vm_ref)
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference.
This function differs from the XenAPI `VDI.copy` call in that the copy is
atomic and isolated, meaning we don't see half-downloaded images. It
accomplishes this by copying the VDI's into a temporary directory and then
atomically renaming them into the SR when the copy is completed.
The correct long term solution is to fix `VDI.copy` so that it is atomic
and isolated.
"""
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
label = "snapshot"
with snapshot_attached_here(
session, instance, vm_ref, label) as vdi_uuids:
imported_vhds = session.call_plugin_serialized(
'workarounds', 'safe_copy_vdis',
sr_path=get_sr_path(session, sr_ref=sr_ref),
vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack())
root_uuid = imported_vhds['root']['uuid']
# rescan to discover new VHDs
scan_default_sr(session)
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s',
{'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref})
return vdi_ref
def _get_vdi_other_config(disk_type, instance=None):
"""Return metadata to store in VDI's other_config attribute.
`nova_instance_uuid` is used to associate a VDI with a particular instance
so that, if it becomes orphaned from an unclean shutdown of a
compute-worker, we can safely detach it.
"""
other_config = {'nova_disk_type': disk_type}
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
if instance:
other_config['nova_instance_uuid'] = instance['uuid']
return other_config
def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description,
instance):
existing_other_config = session.call_xenapi('VDI.get_other_config',
vdi_ref)
session.call_xenapi('VDI.set_name_label', vdi_ref, name_label)
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
other_config = _get_vdi_other_config(vdi_type, instance=instance)
for key, value in six.iteritems(other_config):
if key not in existing_other_config:
session.call_xenapi(
"VDI.add_to_other_config", vdi_ref, key, value)
def _vm_get_vbd_refs(session, vm_ref):
return session.call_xenapi("VM.get_VBDs", vm_ref)
def _vbd_get_rec(session, vbd_ref):
return session.call_xenapi("VBD.get_record", vbd_ref)
def _vdi_get_rec(session, vdi_ref):
return session.call_xenapi("VDI.get_record", vdi_ref)
def _vdi_get_uuid(session, vdi_ref):
return session.call_xenapi("VDI.get_uuid", vdi_ref)
def _vdi_snapshot(session, vdi_ref):
return session.call_xenapi("VDI.snapshot", vdi_ref, {})
def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'):
"""Retrieves the primary VDI for a VM."""
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == userdevice:
vdi_ref = vbd_rec['VDI']
vdi_rec = _vdi_get_rec(session, vdi_ref)
return vdi_ref, vdi_rec
raise exception.NovaException(_("No primary VDI found for %s") % vm_ref)
def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0):
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
if int(vbd_rec['userdevice']) >= min_userdevice:
vdi_ref = vbd_rec['VDI']
yield _vdi_get_uuid(session, vdi_ref)
def _try_strip_base_mirror_from_vdi(session, vdi_ref):
try:
session.call_xenapi("VDI.remove_from_sm_config", vdi_ref,
"base_mirror")
except session.XenAPI.Failure:
LOG.debug("Error while removing sm_config", exc_info=True)
def strip_base_mirror_from_vdis(session, vm_ref):
# NOTE(johngarbutt) part of workaround for XenServer bug CA-98606
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
_try_strip_base_mirror_from_vdi(session, vdi_ref)
def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
possible_snapshot_parents = vdi_uuid_chain[1:]
if len(possible_snapshot_parents) == 0:
LOG.debug("No VHD chain.", instance=instance)
return
snapshot_uuids = _child_vhds(session, sr_ref, possible_snapshot_parents,
old_snapshots_only=True)
number_of_snapshots = len(snapshot_uuids)
if number_of_snapshots <= 0:
LOG.debug("No snapshots to remove.", instance=instance)
return
vdi_refs = [session.VDI.get_by_uuid(vdi_uuid)
for vdi_uuid in snapshot_uuids]
safe_destroy_vdis(session, vdi_refs)
# ensure garbage collector has been run
_scan_sr(session, sr_ref)
LOG.info(_LI("Deleted %s snapshots.") % number_of_snapshots,
instance=instance)
def remove_old_snapshots(session, instance, vm_ref):
"""See if there is an snapshot present that should be removed."""
LOG.debug("Starting remove_old_snapshots for VM", instance=instance)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
sr_ref = vm_vdi_rec["SR"]
_delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0',
post_snapshot_callback=None):
# impl method allow easier patching for tests
return _snapshot_attached_here_impl(session, instance, vm_ref, label,
userdevice, post_snapshot_callback)
def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
LOG.debug("Starting snapshot for VM", instance=instance)
# Memorize the VDI chain so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref,
userdevice)
chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
sr_ref = vm_vdi_rec["SR"]
# clean up after any interrupted snapshot attempts
_delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
snapshot_ref = _vdi_snapshot(session, vm_vdi_ref)
if post_snapshot_callback is not None:
post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
# When the VDI snapshot is taken a new parent is introduced.
# If we have taken a snapshot before, the new parent can be coalesced.
# We need to wait for this to happen before trying to copy the chain.
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
vdi_uuid_chain)
snapshot_uuid = _vdi_get_uuid(session, snapshot_ref)
chain = _walk_vdi_chain(session, snapshot_uuid)
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain]
yield vdi_uuids
finally:
safe_destroy_vdis(session, [snapshot_ref])
# TODO(johngarbut) we need to check the snapshot has been coalesced
# now its associated VDI has been deleted.
def get_sr_path(session, sr_ref=None):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
if sr_ref is None:
sr_ref = safe_find_sr(session)
pbd_rec = session.call_xenapi("PBD.get_all_records_where",
'field "host"="%s" and '
'field "SR"="%s"' %
(session.host_ref, sr_ref))
# NOTE(bobball): There can only be one PBD for a host/SR pair, but path is
# not always present - older versions of XS do not set it.
pbd_ref = pbd_rec.keys()[0]
device_config = pbd_rec[pbd_ref]['device_config']
if 'path' in device_config:
return device_config['path']
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
if sr_rec["type"] not in ["ext", "nfs"]:
raise exception.NovaException(
_("Only file-based SRs (ext/NFS) are supported by this feature."
" SR %(uuid)s is of type %(type)s") %
{"uuid": sr_uuid, "type": sr_rec["type"]})
return os.path.join(CONF.xenserver.sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
"""Destroy used or unused cached images.
A cached image that is being used by at least one VM is said to be 'used'.
In the case of an 'unused' image, the cached image will be the only
descendent of the base-copy. So when we delete the cached-image, the
refcount will drop to zero and XenServer will automatically destroy the
base-copy for us.
The default behavior of this function is to destroy only 'unused' cached
images. To destroy all cached images, use the `all_cached=True` kwarg.
"""
cached_images = _find_cached_images(session, sr_ref)
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug("Destroying cached VDI '%(vdi_uuid)s'")
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
for vdi_ref in cached_images.values():
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
if all_cached:
destroy_cached_vdi(vdi_uuid, vdi_ref)
continue
# Unused-Only: Search for siblings
# Chain length greater than two implies a VM must be holding a ref to
# the base-copy (otherwise it would have coalesced), so consider this
# cached image used.
chain = list(_walk_vdi_chain(session, vdi_uuid))
if len(chain) > 2:
continue
elif len(chain) == 2:
# Siblings imply cached image is used
root_vdi_rec = chain[-1]
children = _child_vhds(session, sr_ref, [root_vdi_rec['uuid']])
if len(children) > 1:
continue
destroy_cached_vdi(vdi_uuid, vdi_ref)
return destroyed
def _find_cached_images(session, sr_ref):
"""Return a dict(uuid=vdi_ref) representing all cached images."""
cached_images = {}
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
image_id = vdi_rec['other_config']['image-id']
except KeyError:
continue
cached_images[image_id] = vdi_ref
return cached_images
def _find_cached_image(session, image_id, sr_ref):
"""Returns the vdi-ref of the cached image."""
name_label = _get_image_vdi_label(image_id)
recs = session.call_xenapi("VDI.get_all_records_where",
'field "name__label"="%s"'
% name_label)
number_found = len(recs)
if number_found > 0:
if number_found > 1:
LOG.warning(_LW("Multiple base images for image: %s"), image_id)
return recs.keys()[0]
def _get_resize_func_name(session):
brand = session.product_brand
version = session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if version and brand:
xcp = brand == 'XCP'
r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def _vdi_get_virtual_size(session, vdi_ref):
size = session.call_xenapi('VDI.get_virtual_size', vdi_ref)
return int(size)
def _vdi_resize(session, vdi_ref, new_size):
resize_func_name = _get_resize_func_name(session)
session.call_xenapi(resize_func_name, vdi_ref, str(new_size))
def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
virtual_size = _vdi_get_virtual_size(session, vdi_ref)
new_disk_size = new_gb * units.Gi
msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size},
instance=instance)
if virtual_size < new_disk_size:
# For resize up. Simple VDI resize will do the trick
_vdi_resize(session, vdi_ref, new_disk_size)
elif virtual_size == new_disk_size:
LOG.debug("No need to change vdi virtual size.",
instance=instance)
else:
# NOTE(johngarbutt): we should never get here
# but if we don't raise an exception, a user might be able to use
# more storage than allowed by their chosen instance flavor
msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger "
"than flavor size of %(new_disk_size)d bytes.")
msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size}
LOG.debug(msg, instance=instance)
raise exception.ResizeError(reason=msg)
def resize_disk(session, instance, vdi_ref, flavor):
size_gb = flavor['root_gb']
if size_gb == 0:
reason = _("Can't resize a disk to 0 GB.")
raise exception.ResizeError(reason=reason)
sr_ref = safe_find_sr(session)
clone_ref = _clone_vdi(session, vdi_ref)
try:
# Resize partition and filesystem down
_auto_configure_disk(session, clone_ref, size_gb)
# Create new VDI
vdi_size = size_gb * units.Gi
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
'root', vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = size_gb * units.Gi
_copy_partition(session, clone_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
finally:
destroy_vdi(session, clone_ref)
def _auto_configure_disk(session, vdi_ref, new_gb):
"""Partition and resize FS to match the size specified by
flavors.root_gb.
This is a fail-safe to prevent accidentally destroying data on a disk
erroneously marked as auto_disk_config=True.
The criteria for allowing resize are:
1. 'auto_disk_config' must be true for the instance (and image).
(If we've made it here, then auto_disk_config=True.)
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
"""
if new_gb == 0:
LOG.debug("Skipping auto_config_disk as destination size is 0GB")
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
reason = _('Disk must have only one partition.')
raise exception.CannotResizeDisk(reason=reason)
num, start, old_sectors, fstype, name, flags = partitions[0]
if fstype not in ('ext3', 'ext4'):
reason = _('Disk contains a filesystem '
'we are unable to resize: %s')
raise exception.CannotResizeDisk(reason=(reason % fstype))
if num != 1:
reason = _('The only partition should be partition 1.')
raise exception.CannotResizeDisk(reason=reason)
new_sectors = new_gb * units.Gi / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors, flags)
def try_auto_configure_disk(session, vdi_ref, new_gb):
try:
_auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s')
LOG.warn(msg % e)
def _make_partition(session, dev, partition_start, partition_end):
dev_path = utils.make_dev_path(dev)
# NOTE(bobball) If this runs in Dom0, parted will error trying
# to re-read the partition table and return a generic error
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True,
check_exit_code=not session.is_local_connection)
utils.execute('parted', '--script', dev_path, '--',
'mkpart', 'primary',
partition_start,
partition_end,
run_as_root=True,
check_exit_code=not session.is_local_connection)
partition_path = utils.make_dev_path(dev, partition=1)
if session.is_local_connection:
# Need to refresh the partitions
utils.trycmd('kpartx', '-a', dev_path,
run_as_root=True,
discard_warnings=True)
# Sometimes the partition gets created under /dev/mapper, depending
# on the setup in dom0.
mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path)
if os.path.exists(mapper_path):
return mapper_path
return partition_path
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type):
"""Steps to programmatically generate a disk:
1. Create VDI of desired size
2. Attach VDI to compute worker
3. Create partition
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = units.Mi
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create partition
partition_start = "0"
partition_end = "-0"
partition_path = _make_partition(session, dev,
partition_start, partition_end)
if fs_type == 'linux-swap':
utils.execute('mkswap', partition_path, run_as_root=True)
elif fs_type is not None:
utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
# 4. Create VBD between instance VM and VDI
if vm_ref:
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while generating disk number: %s" % userdevice
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
return vdi_ref
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
fs_type = "vfat" if is_windows else "linux-swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
def get_ephemeral_disk_sizes(total_size_gb):
if not total_size_gb:
return
max_size_gb = 2000
if total_size_gb % 1024 == 0:
max_size_gb = 1024
left_to_allocate = total_size_gb
while left_to_allocate > 0:
size_gb = min(max_size_gb, left_to_allocate)
yield size_gb
left_to_allocate -= size_gb
def generate_single_ephemeral(session, instance, vm_ref, userdevice,
size_gb, instance_name_label=None):
if instance_name_label is None:
instance_name_label = instance["name"]
name_label = "%s ephemeral" % instance_name_label
# TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
label_number = int(userdevice) - 4
if label_number > 0:
name_label = "%s (%d)" % (name_label, label_number)
return _generate_disk(session, instance, vm_ref, str(userdevice),
name_label, 'ephemeral', size_gb * 1024,
CONF.default_ephemeral_format)
def generate_ephemeral(session, instance, vm_ref, first_userdevice,
instance_name_label, total_size_gb):
# NOTE(johngarbutt): max possible size of a VHD disk is 2043GB
sizes = get_ephemeral_disk_sizes(total_size_gb)
first_userdevice = int(first_userdevice)
vdi_refs = []
try:
for userdevice, size_gb in enumerate(sizes, start=first_userdevice):
ref = generate_single_ephemeral(session, instance, vm_ref,
userdevice, size_gb,
instance_name_label)
vdi_refs.append(ref)
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.debug("Error when generating ephemeral disk. "
"Device: %(userdevice)s Size GB: %(size_gb)s "
"Error: %(exc)s", {
'userdevice': userdevice,
'size_gb': size_gb,
'exc': exc})
safe_destroy_vdis(session, vdi_refs)
def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
name_label, size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'user', size_gb * 1024, CONF.default_ephemeral_format)
def generate_configdrive(session, instance, vm_ref, userdevice,
network_info, admin_password=None, files=None):
sr_ref = safe_find_sr(session)
vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md,
network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive')
cdb.make_drive(tmp_file)
dev_path = utils.make_dev_path(dev)
utils.execute('dd',
'if=%s' % tmp_file,
'of=%s' % dev_path,
'oflag=direct,sync',
run_as_root=True)
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
read_only=True)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while generating config drive"
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
def _create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
If the image is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
if CONF.xenserver.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
if filename == "":
return _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
else:
vdi_type = ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
def create_kernel_and_ramdisk(context, session, instance, name_label):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['kernel_id'],
ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['ramdisk_id'],
ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
return kernel_file, ramdisk_file
def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
if args:
LOG.debug("Removing kernel/ramdisk files from dom0",
instance=instance)
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
def _get_image_vdi_label(image_id):
return 'Glance Image %s' % image_id
def _create_cached_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_type', sr_ref)
if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_LW("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag."), sr_type)
@utils.synchronized('xenapi-image-cache' + image_id)
def _create_cached_image_impl(context, session, instance, name_label,
image_id, image_type, sr_ref):
cache_vdi_ref = _find_cached_image(session, image_id, sr_ref)
downloaded = False
if cache_vdi_ref is None:
downloaded = True
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
cache_vdi_ref = session.call_xenapi(
'VDI.get_by_uuid', vdis['root']['uuid'])
session.call_xenapi('VDI.set_name_label', cache_vdi_ref,
_get_image_vdi_label(image_id))
session.call_xenapi('VDI.set_name_description', cache_vdi_ref,
'root')
session.call_xenapi('VDI.add_to_other_config',
cache_vdi_ref, 'image-id', str(image_id))
if CONF.use_cow_images:
new_vdi_ref = _clone_vdi(session, cache_vdi_ref)
elif sr_type == 'ext':
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance,
cache_vdi_ref)
else:
new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref,
sr_ref)
session.call_xenapi('VDI.set_name_label', new_vdi_ref, '')
session.call_xenapi('VDI.set_name_description', new_vdi_ref, '')
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
return downloaded, vdi_uuid
downloaded, vdi_uuid = _create_cached_image_impl(context, session,
instance, name_label,
image_id, image_type,
sr_ref)
vdis = {}
vdi_type = ImageType.get_role(image_type)
vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
return downloaded, vdis
def create_image(context, session, instance, name_label, image_id,
image_type):
"""Creates VDI from the image stored in the local cache. If the image
is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
cache_images = CONF.xenserver.cache_images.lower()
# Determine if the image is cacheable
if image_type == ImageType.DISK_ISO:
cache = False
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
except KeyError:
cache = False
elif cache_images == 'none':
cache = False
else:
LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to"
" True"), CONF.xenserver.cache_images)
cache = True
# Fetch (and cache) the image
start_time = timeutils.utcnow()
if cache:
downloaded, vdis = _create_cached_image(context, session, instance,
name_label, image_id,
image_type)
else:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
downloaded = True
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
LOG.info(_LI("Image creation data, cacheable: %(cache)s, "
"downloaded: %(downloaded)s duration: %(duration).2f secs "
"for image %(image_id)s"),
{'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
'duration': duration})
for vdi_type, vdi in six.iteritems(vdis):
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
_set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type,
instance)
return vdis
def _fetch_image(context, session, instance, name_label, image_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
vdis = _fetch_vhd_image(context, session, instance, image_id)
else:
vdis = _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in six.iteritems(vdis):
vdi_uuid = vdi['uuid']
LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'",
{'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid},
instance=instance)
return vdis
def _make_uuid_stack():
# NOTE(sirp): The XenAPI plugins run under Python 2.4
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in range(MAX_VDI_CHAIN_SIZE)]
def _image_uses_bittorrent(context, instance):
bittorrent = False
torrent_images = CONF.xenserver.torrent_images.lower()
if torrent_images == 'all':
bittorrent = True
elif torrent_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
bittorrent = strutils.bool_from_string(
sys_meta['image_bittorrent'])
except KeyError:
pass
elif torrent_images == 'none':
pass
else:
LOG.warning(_LW("Invalid value '%s' for torrent_images"),
torrent_images)
return bittorrent
def _default_download_handler():
# TODO(sirp): This should be configurable like upload_handler
return importutils.import_object(
'nova.virt.xenapi.image.glance.GlanceStore')
def _choose_download_handler(context, instance):
if _image_uses_bittorrent(context, instance):
return importutils.import_object(
'nova.virt.xenapi.image.bittorrent.BittorrentStore')
else:
return _default_download_handler()
def get_compression_level():
level = CONF.xenserver.image_compression_level
if level is not None and (level < 1 or level > 9):
LOG.warning(_LW("Invalid value '%d' for image_compression_level"),
level)
return None
return level
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
LOG.debug("Asking xapi to fetch vhd image %s", image_id,
instance=instance)
handler = _choose_download_handler(context, instance)
try:
vdis = handler.download_image(context, session, instance, image_id)
except Exception:
default_handler = _default_download_handler()
# Using type() instead of isinstance() so instance of subclass doesn't
# test as equivalent
if type(handler) == type(default_handler):
raise
LOG.exception(_LE("Download handler '%(handler)s' raised an"
" exception, falling back to default handler"
" '%(default_handler)s'"),
{'handler': handler,
'default_handler': default_handler})
vdis = default_handler.download_image(
context, session, instance, image_id)
# Ensure we can see the import VHDs as VDIs
scan_default_sr(session)
vdi_uuid = vdis['root']['uuid']
try:
_check_vdi_size(context, session, instance, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while checking vdi size"
LOG.debug(msg, instance=instance, exc_info=True)
for vdi in vdis.values():
vdi_uuid = vdi['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
safe_destroy_vdis(session, [vdi_ref])
return vdis
def _get_vdi_chain_size(session, vdi_uuid):
"""Compute the total size of a VDI chain, starting with the specified
VDI UUID.
This will walk the VDI chain to the root, add the size of each VDI into
the total.
"""
size_bytes = 0
for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d',
{'cur_vdi_uuid': cur_vdi_uuid,
'vdi_size_bytes': vdi_size_bytes})
size_bytes += vdi_size_bytes
return size_bytes
def _check_vdi_size(context, session, instance, vdi_uuid):
flavor = instance.get_flavor()
allowed_size = (flavor.root_gb +
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi
if not flavor.root_gb:
# root_gb=0 indicates that we're disabling size checks
return
size = _get_vdi_chain_size(session, vdi_uuid)
if size > allowed_size:
LOG.error(_LE("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d"),
{'size': size, 'allowed_size': allowed_size},
instance=instance)
raise exception.FlavorDiskTooSmall()
def _fetch_disk_image(context, session, instance, name_label, image_id,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_vhd_image, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
image_type_str = ImageType.to_string(image_type)
LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s",
{'image_id': image_id, 'image_type_str': image_type_str},
instance=instance)
if image_type == ImageType.DISK_ISO:
sr_ref = _safe_find_iso_sr(session)
else:
sr_ref = safe_find_sr(session)
glance_image = image_utils.GlanceImage(context, image_id)
if glance_image.is_raw_tgz():
image = image_utils.RawTGZImage(glance_image)
else:
image = image_utils.RawImage(glance_image)
virtual_size = image.get_size()
vdi_size = virtual_size
LOG.debug("Size for image %(image_id)s: %(virtual_size)d",
{'image_id': image_id, 'virtual_size': virtual_size},
instance=instance)
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
vdi_size > CONF.xenserver.max_kernel_ramdisk_size):
max_size = CONF.xenserver.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") %
{'vdi_size': vdi_size, 'max_size': max_size})
vdi_ref = create_vdi(session, sr_ref, instance, name_label,
image_type_str, vdi_size)
# From this point we have a VDI on Xen host;
# If anything goes wrong, we need to remember its uuid.
try:
filename = None
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_stream_disk(
session, image.stream_to, image_type, virtual_size, dev)
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
# We need to invoke a plugin for copying the
# content of the VDI into the proper path.
LOG.debug("Copying VDI %s to /boot/guest on dom0",
vdi_ref, instance=instance)
args = {}
args['vdi-ref'] = vdi_ref
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
if CONF.xenserver.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
# Remove the VDI as it is not needed anymore.
destroy_vdi(session, vdi_ref)
LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref,
instance=instance)
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=None, file=filename)}
else:
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_LE("Failed to fetch glance image"),
instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid,
file=filename)],)
raise
def determine_disk_image_type(image_meta):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
if not image_meta or 'disk_format' not in image_meta:
return None
disk_format = image_meta['disk_format']
disk_format_map = {
'ami': ImageType.DISK,
'aki': ImageType.KERNEL,
'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD,
'iso': ImageType.DISK_ISO,
}
try:
image_type = disk_format_map[disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=disk_format)
image_ref = image_meta.get('id')
params = {
'image_type_str': ImageType.to_string(image_type),
'image_ref': image_ref
}
LOG.debug("Detected %(image_type_str)s format for image %(image_ref)s",
params)
return image_type
def determine_vm_mode(instance, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
os_type = instance['os_type']
if os_type == "linux":
return vm_mode.XEN
if os_type == "windows":
return vm_mode.HVM
# disk_image_type specific default for backwards compatibility
if disk_image_type == ImageType.DISK_VHD or \
disk_image_type == ImageType.DISK:
return vm_mode.XEN
# most images run OK as HVM
return vm_mode.HVM
def set_vm_name_label(session, vm_ref, name_label):
session.call_xenapi("VM.set_name_label", vm_ref, name_label)
def list_vms(session):
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="false" and '
'field "is_a_template"="false" and '
'field "resident_on"="%s"' % session.host_ref)
for vm_ref in vms.keys():
yield vm_ref, vms[vm_ref]
def lookup_vm_vdis(session, vm_ref):
"""Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
vdi_refs = []
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
# Test valid VDI
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
LOG.debug('VDI %s is still available', vdi_uuid)
vbd_other_config = session.call_xenapi("VBD.get_other_config",
vbd_ref)
if not vbd_other_config.get('osvol'):
# This is not an attached volume
vdi_refs.append(vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('"Look for the VDIs failed'))
return vdi_refs
def lookup(session, name_label, check_rescue=False):
"""Look the instance up and return it if available.
:param:check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
if check_rescue:
result = lookup(session, name_label + '-rescue', False)
if result:
return result
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
return None
elif n > 1:
raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
key = str(instance['key_data'])
net = netutils.get_injected_network_template(network_info)
metadata = instance['metadata']
# As mounting the image VDI is expensive, we only want do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = key or net or metadata
if not mount_required:
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_mounted_processing(dev, key, net, metadata)
def lookup_kernel_ramdisk(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
else:
return (None, None)
def is_snapshot(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
else:
return False
def get_power_state(session, vm_ref):
xapi_state = session.call_xenapi("VM.get_power_state", vm_ref)
return XENAPI_POWER_STATE[xapi_state]
def compile_info(session, vm_ref):
"""Fill record with VM status information."""
power_state = get_power_state(session, vm_ref)
max_mem = session.call_xenapi("VM.get_memory_static_max", vm_ref)
mem = session.call_xenapi("VM.get_memory_dynamic_max", vm_ref)
num_cpu = session.call_xenapi("VM.get_VCPUs_max", vm_ref)
return hardware.InstanceInfo(state=power_state,
max_mem_kb=long(max_mem) >> 10,
mem_kb=long(mem) >> 10,
num_cpu=num_cpu)
def compile_instance_diagnostics(instance, vm_rec):
vm_power_state_int = XENAPI_POWER_STATE[vm_rec['power_state']]
vm_power_state = power_state.STATE_MAP[vm_power_state_int]
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=vm_power_state,
driver='xenapi',
config_drive=config_drive)
for cpu_num in range(0, long(vm_rec['VCPUs_max'])):
diags.add_cpu()
for vif in vm_rec['VIFs']:
diags.add_nic()
for vbd in vm_rec['VBDs']:
diags.add_disk()
max_mem_bytes = long(vm_rec['memory_dynamic_max'])
diags.memory_details.maximum = max_mem_bytes / units.Mi
return diags
def compile_diagnostics(vm_rec):
"""Compile VM diagnostics data."""
try:
keys = []
diags = {}
vm_uuid = vm_rec["uuid"]
xml = _get_rrd(_get_rrd_server(), vm_uuid)
if xml:
rrd = minidom.parseString(xml)
for i, node in enumerate(rrd.firstChild.childNodes):
# Provide the last update of the information
if node.localName == 'lastupdate':
diags['last_update'] = node.firstChild.data
# Create a list of the diagnostic keys (in their order)
if node.localName == 'ds':
ref = node.childNodes
# Name and Value
if len(ref) > 6:
keys.append(ref[0].firstChild.data)
# Read the last row of the first RRA to get the latest info
if node.localName == 'rra':
rows = node.childNodes[4].childNodes
last_row = rows[rows.length - 1].childNodes
for j, value in enumerate(last_row):
diags[keys[j]] = value.firstChild.data
break
return diags
except expat.ExpatError as e:
LOG.exception(_LE('Unable to parse rrd of %s'), e)
return {"Unable to retrieve diagnostics": e}
def fetch_bandwidth(session):
bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth')
return bw
def _scan_sr(session, sr_ref=None, max_attempts=4):
if sr_ref:
# NOTE(johngarbutt) xenapi will collapse any duplicate requests
# for SR.scan if there is already a scan in progress.
# However, we don't want that, because the scan may have started
# before we modified the underlying VHDs on disk through a plugin.
# Using our own mutex will reduce cases where our periodic SR scan
# in host.update_status starts racing the sr.scan after a plugin call.
@utils.synchronized('sr-scan-' + sr_ref)
def do_scan(sr_ref):
LOG.debug("Scanning SR %s", sr_ref)
attempt = 1
while True:
try:
return session.call_xenapi('SR.scan', sr_ref)
except session.XenAPI.Failure as exc:
with excutils.save_and_reraise_exception() as ctxt:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
if attempt < max_attempts:
ctxt.reraise = False
LOG.warning(_LW("Retry SR scan due to error: "
"%s"), exc)
greenthread.sleep(2 ** attempt)
attempt += 1
do_scan(sr_ref)
def scan_default_sr(session):
"""Looks for the system default SR and triggers a re-scan."""
sr_ref = safe_find_sr(session)
_scan_sr(session, sr_ref)
return sr_ref
def safe_find_sr(session):
"""Same as _find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = _find_sr(session)
if sr_ref is None:
raise exception.StorageRepositoryNotFound()
return sr_ref
def _find_sr(session):
"""Return the storage repository to hold VM images."""
host = session.host_ref
try:
tokens = CONF.xenserver.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect "
"formatting convention"),
CONF.xenserver.sr_matching_filter)
return None
if filter_criteria == 'other-config':
key, value = filter_pattern.split('=', 1)
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if not (key in sr_rec['other_config'] and
sr_rec['other_config'][key] == value):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_rec('PBD', pbd_ref)
if pbd_rec and pbd_rec['host'] == host:
return sr_ref
elif filter_criteria == 'default-sr' and filter_pattern == 'true':
pool_ref = session.call_xenapi('pool.get_all')[0]
sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref)
if sr_ref:
return sr_ref
# No SR found!
LOG.error(_LE("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."))
return None
def _safe_find_iso_sr(session):
"""Same as _find_iso_sr except raises a NotFound exception if SR
cannot be determined
"""
sr_ref = _find_iso_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
def _find_iso_sr(session):
"""Return the storage repository to hold ISO images."""
host = session.host_ref
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug("ISO: looking at SR %s", sr_rec)
if not sr_rec['content_type'] == 'iso':
LOG.debug("ISO: not iso content")
continue
if 'i18n-key' not in sr_rec['other_config']:
LOG.debug("ISO: iso content_type, no 'i18n-key' key")
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'")
continue
LOG.debug("ISO: SR MATCHing our criteria")
for pbd_ref in sr_rec['PBDs']:
LOG.debug("ISO: ISO, looking to see if it is host local")
pbd_rec = session.get_rec('PBD', pbd_ref)
if not pbd_rec:
LOG.debug("ISO: PBD %s disappeared", pbd_ref)
continue
pbd_rec_host = pbd_rec['host']
LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s",
{'pbd_rec': pbd_rec, 'host': host})
if pbd_rec_host == host:
LOG.debug("ISO: SR with local PBD")
return sr_ref
return None
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return [xs_url.scheme, xs_url.netloc]
def _get_rrd(server, vm_uuid):
"""Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
CONF.xenserver.connection_username,
CONF.xenserver.connection_password,
server[1],
vm_uuid))
return xml.read()
except IOError:
LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'),
{'vm_uuid': vm_uuid, 'server': server})
return None
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
vdi_rec = session.get_rec('VDI', vdi_ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_rec call
if vdi_rec:
yield vdi_ref, vdi_rec
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
"""Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref):
yield vdi_ref
except session.XenAPI.Failure:
continue
def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None):
if vdi_rec is None:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
if 'vhd-parent' not in vdi_rec['sm_config']:
return None
parent_uuid = vdi_rec['sm_config']['vhd-parent']
vdi_uuid = vdi_rec['uuid']
LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s',
{'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid})
return parent_uuid
def _walk_vdi_chain(session, vdi_uuid):
"""Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
yield vdi_rec
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec)
if not parent_uuid:
break
vdi_uuid = parent_uuid
def _is_vdi_a_snapshot(vdi_rec):
"""Ensure VDI is a snapshot, and not cached image."""
is_a_snapshot = vdi_rec['is_a_snapshot']
image_id = vdi_rec['other_config'].get('image-id')
return is_a_snapshot and not image_id
def _child_vhds(session, sr_ref, vdi_uuid_list, old_snapshots_only=False):
"""Return the immediate children of a given VHD.
This is not recursive, only the immediate children are returned.
"""
children = set()
for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
rec_uuid = rec['uuid']
if rec_uuid in vdi_uuid_list:
continue
parent_uuid = _get_vhd_parent_uuid(session, ref, rec)
if parent_uuid not in vdi_uuid_list:
continue
if old_snapshots_only and not _is_vdi_a_snapshot(rec):
continue
children.add(rec_uuid)
return list(children)
def _count_children(session, parent_vdi_uuid, sr_ref):
# Search for any other vdi which has the same parent as us to work out
# whether we have siblings and therefore if coalesce is possible
children = 0
for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
if (rec['sm_config'].get('vhd-parent') == parent_vdi_uuid):
children = children + 1
return children
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
vdi_uuid_list):
"""Spin until the parent VHD is coalesced into one of the VDIs in the list
vdi_uuid_list is a list of acceptable final parent VDIs for vdi_ref; once
the parent of vdi_ref is in vdi_uuid_chain we consider the coalesce over.
The use case is there are any number of VDIs between those in
vdi_uuid_list and vdi_ref that we expect to be coalesced, but any of those
in vdi_uuid_list may also be coalesced (except the base UUID - which is
guaranteed to remain)
"""
# If the base disk was a leaf node, there will be no coalescing
# after a VDI snapshot.
if len(vdi_uuid_list) == 1:
LOG.debug("Old chain is single VHD, coalesce not possible.",
instance=instance)
return
# If the parent of the original disk has other children,
# there will be no coalesce because of the VDI snapshot.
# For example, the first snapshot for an instance that has been
# spawned from a cached image, will not coalesce, because of this rule.
parent_vdi_uuid = vdi_uuid_list[1]
if _count_children(session, parent_vdi_uuid, sr_ref) > 1:
LOG.debug("Parent has other children, coalesce is unlikely.",
instance=instance)
return
# When the VDI snapshot is taken, a new parent is created.
# Assuming it is not one of the above cases, that new parent
# can be coalesced, so we need to wait for that to happen.
max_attempts = CONF.xenserver.vhd_coalesce_max_attempts
# Remove the leaf node from list, to get possible good parents
# when the coalesce has completed.
# Its possible that other coalesce operation happen, so we need
# to consider the full chain, rather than just the most recent parent.
good_parent_uuids = vdi_uuid_list[1:]
for i in range(max_attempts):
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
# matches the underlying VHDs.
# This can also kick XenServer into performing a pending coalesce.
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if parent_uuid and (parent_uuid not in good_parent_uuids):
LOG.debug("Parent %(parent_uuid)s not yet in parent list"
" %(good_parent_uuids)s, waiting for coalesce...",
{'parent_uuid': parent_uuid,
'good_parent_uuids': good_parent_uuids},
instance=instance)
else:
LOG.debug("Coalesce detected, because parent is: %s" % parent_uuid,
instance=instance)
return
greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%d)"
", giving up...") % max_attempts)
raise exception.NovaException(msg)
def _remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
fixed in future versions:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
should_remap = CONF.xenserver.remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
new_prefix = CONF.xenserver.remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
def _wait_for_device(dev):
"""Wait for device node to appear."""
for i in range(0, CONF.xenserver.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
time.sleep(1)
raise exception.StorageError(
reason=_('Timeout waiting for device %s to be created') % dev)
def cleanup_attached_vdis(session):
"""Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
except session.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an
# unclean restart
LOG.info(_LI('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref)
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = _get_this_vm_ref(session)
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
try:
LOG.debug('Plugging VBD %s ... ', vbd_ref)
session.VBD.plug(vbd_ref, this_vm_ref)
try:
LOG.debug('Plugging VBD %s done.', vbd_ref)
orig_dev = session.call_xenapi("VBD.get_device", vbd_ref)
LOG.debug('VBD %(vbd_ref)s plugged as %(orig_dev)s',
{'vbd_ref': vbd_ref, 'orig_dev': orig_dev})
dev = _remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s',
{'vbd_ref': vbd_ref, 'dev': dev})
_wait_for_device(dev)
yield dev
finally:
utils.execute('sync', run_as_root=True)
LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref)
unplug_vbd(session, vbd_ref, this_vm_ref)
finally:
try:
destroy_vbd(session, vbd_ref)
except exception.StorageError:
# destroy_vbd() will log error
pass
LOG.debug('Destroying VBD for VDI %s done.', vdi_ref)
def _get_sys_hypervisor_uuid():
with file('/sys/hypervisor/uuid') as f:
return f.readline().strip()
def get_this_vm_uuid(session):
if session and session.is_local_connection:
# UUID is the control domain running on this host
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="true" and '
'field "resident_on"="%s"' %
session.host_ref)
return vms[vms.keys()[0]]['uuid']
try:
return _get_sys_hypervisor_uuid()
except IOError:
# Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25)
# cannot read from uuid after a reboot. Fall back to trying xenstore.
# See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182
domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True)
vm_key, _ = utils.execute('xenstore-read',
'/local/domain/%s/vm' % domid.strip(),
run_as_root=True)
return vm_key.strip()[4:]
def _get_this_vm_ref(session):
return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session))
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug("Partitions:")
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
num = int(num)
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(" %(num)s: %(fstype)s %(size)d sectors",
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
return partitions
def _stream_disk(session, image_service_func, image_type, virtual_size, dev):
offset = 0
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(session, virtual_size, dev)
dev_path = utils.make_dev_path(dev)
with utils.temporary_chown(dev_path):
with open(dev_path, 'wb') as f:
f.seek(offset)
image_service_func(f)
def _write_partition(session, virtual_size, dev):
dev_path = utils.make_dev_path(dev)
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...',
{'primary_first': primary_first, 'primary_last': primary_last,
'dev_path': dev_path})
def execute(*cmd, **kwargs):
return utils.execute(*cmd, **kwargs)
_make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last)
LOG.debug('Writing partition table %s done.', dev_path)
def _repair_filesystem(partition_path):
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags):
"""Resize partition and fileystem.
This assumes we are dealing with a single primary partition and using
ext3 or ext4.
"""
size = new_sectors - start
end = new_sectors - 1
dev_path = utils.make_dev_path(dev)
partition_path = utils.make_dev_path(dev, partition=1)
# Replay journal if FS wasn't cleanly unmounted
_repair_filesystem(partition_path)
# Remove ext3 journal (making it ext2)
utils.execute('tune2fs', '-O ^has_journal', partition_path,
run_as_root=True)
if new_sectors < old_sectors:
# Resizing down, resize filesystem before partition resize
try:
utils.execute('resize2fs', partition_path, '%ds' % size,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(six.text_type(exc))
reason = _("Shrinking the filesystem down with resize2fs "
"has failed, please check if you have "
"enough free space on your disk.")
raise exception.ResizeError(reason=reason)
utils.execute('parted', '--script', dev_path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', dev_path, 'mkpart',
'primary',
'%ds' % start,
'%ds' % end,
run_as_root=True)
if "boot" in flags.lower():
utils.execute('parted', '--script', dev_path,
'set', '1', 'boot', 'on',
run_as_root=True)
if new_sectors > old_sectors:
# Resizing up, resize filesystem after partition resize
utils.execute('resize2fs', partition_path, run_as_root=True)
# Add back journal
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
def _log_progress_if_required(left, last_log_time, virtual_size):
if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS):
last_log_time = timeutils.utcnow()
complete_pct = float(virtual_size - left) / virtual_size * 100
LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": complete_pct, "left": left})
return last_log_time
def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
"""Copy data, skipping long runs of zeros to create a sparse file."""
start_time = last_log_time = timeutils.utcnow()
EMPTY_BLOCK = '\0' * block_size
bytes_read = 0
skipped_bytes = 0
left = virtual_size
LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d",
{'src_path': src_path, 'dst_path': dst_path,
'virtual_size': virtual_size, 'block_size': block_size})
# NOTE(sirp): we need read/write access to the devices; since we don't have
# the luxury of shelling out to a sudo'd command, we temporarily take
# ownership of the devices.
with utils.temporary_chown(src_path):
with utils.temporary_chown(dst_path):
with open(src_path, "r") as src:
with open(dst_path, "w") as dst:
data = src.read(min(block_size, left))
while data:
if data == EMPTY_BLOCK:
dst.seek(block_size, os.SEEK_CUR)
left -= block_size
bytes_read += block_size
skipped_bytes += block_size
else:
dst.write(data)
data_len = len(data)
left -= data_len
bytes_read += data_len
if left <= 0:
break
data = src.read(min(block_size, left))
greenthread.sleep(0)
last_log_time = _log_progress_if_required(
left, last_log_time, virtual_size)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
compression_pct = float(skipped_bytes) / bytes_read * 100
LOG.debug("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size",
{'duration': duration, 'compression_pct': compression_pct})
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
# Part of disk taken up by MBR
virtual_size -= MBR_SIZE_BYTES
with vdi_attached_here(session, src_ref, read_only=True) as src:
src_path = utils.make_dev_path(src, partition=partition)
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
dst_path = utils.make_dev_path(dst, partition=partition)
_write_partition(session, virtual_size, dst)
if CONF.xenserver.sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
utils.execute('dd',
'if=%s' % src_path,
'of=%s' % dst_path,
'count=%d' % num_blocks,
'iflag=direct,sync',
'oflag=direct,sync',
run_as_root=True)
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
except processutils.ProcessExecutionError as e:
err = six.text_type(e)
return err
def _mounted_processing(device, key, net, metadata):
"""Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
# Mount only Linux filesystems, to avoid disturbing NTFS images
err = _mount_filesystem(dev_path, tmpdir)
if not err:
try:
# This try block ensures that the umount occurs
if not agent.find_guest_agent(tmpdir):
vfs = vfsimpl.VFSLocalFS(imgfile=None,
imgfmt=None,
imgdir=tmpdir)
LOG.info(_LI('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
else:
LOG.info(_LI('Failed to mount filesystem (expected for '
'non-linux instances): %s'), err)
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here.
"""
this_vm_uuid = get_this_vm_uuid(session)
try:
session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] != 'UUID_INVALID':
raise
raise Exception(_('This domU must be running on the host '
'specified by connection_url'))
def import_all_migrated_disks(session, instance, import_root=True):
root_vdi = None
if import_root:
root_vdi = _import_migrated_root_disk(session, instance)
eph_vdis = _import_migrate_ephemeral_disks(session, instance)
return {'root': root_vdi, 'ephemerals': eph_vdis}
def _import_migrated_root_disk(session, instance):
chain_label = instance['uuid']
vdi_label = instance['name']
return _import_migrated_vhds(session, instance, chain_label, "root",
vdi_label)
def _import_migrate_ephemeral_disks(session, instance):
ephemeral_vdis = {}
instance_uuid = instance['uuid']
ephemeral_gb = instance["ephemeral_gb"]
disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb)
for chain_number, _size in enumerate(disk_sizes, start=1):
chain_label = instance_uuid + "_ephemeral_%d" % chain_number
vdi_label = "%(name)s ephemeral (%(number)d)" % dict(
name=instance['name'], number=chain_number)
ephemeral_vdi = _import_migrated_vhds(session, instance,
chain_label, "ephemeral",
vdi_label)
userdevice = 3 + chain_number
ephemeral_vdis[str(userdevice)] = ephemeral_vdi
return ephemeral_vdis
def _import_migrated_vhds(session, instance, chain_label, disk_type,
vdi_label):
"""Move and possibly link VHDs via the XAPI plugin."""
# TODO(johngarbutt) tidy up plugin params
imported_vhds = session.call_plugin_serialized(
'migration', 'move_vhds_into_sr', instance_uuid=chain_label,
sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack())
# Now we rescan the SR so we find the VHDs
scan_default_sr(session)
vdi_uuid = imported_vhds['root']['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
# Set name-label so we can find if we need to clean up a failed migration
_set_vdi_info(session, vdi_ref, disk_type, vdi_label,
disk_type, instance)
return {'uuid': vdi_uuid, 'ref': vdi_ref}
def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
ephemeral_number=0):
LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d",
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
chain_label = instance['uuid']
if ephemeral_number:
chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number
try:
# TODO(johngarbutt) tidy up plugin params
session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except session.XenAPI.Failure:
msg = "Failed to transfer vhd to new host"
LOG.debug(msg, instance=instance, exc_info=True)
raise exception.MigrationError(reason=msg)
def vm_ref_or_raise(session, instance_name):
vm_ref = lookup(session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
return vm_ref
def handle_ipxe_iso(session, instance, cd_vdi, network_info):
"""iPXE ISOs are a mechanism to allow the customer to roll their own
image.
To use this feature, a service provider needs to configure the
appropriate Nova flags, roll an iPXE ISO, then distribute that image
to customers via Glance.
NOTE: `mkisofs` is not present by default in the Dom0, so the service
provider can either add that package manually to Dom0 or include the
`mkisofs` binary in the image itself.
"""
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
if not boot_menu_url:
LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...'), instance=instance)
return
network_name = CONF.xenserver.ipxe_network_name
if not network_name:
LOG.warning(_LW('ipxe_network_name not set, user will have to'
' enter IP manually...'), instance=instance)
return
network = None
for vif in network_info:
if vif['network']['label'] == network_name:
network = vif['network']
break
if not network:
LOG.warning(_LW("Unable to find network matching '%(network_name)s', "
"user will have to enter IP manually..."),
{'network_name': network_name}, instance=instance)
return
sr_path = get_sr_path(session)
# Unpack IPv4 network info
subnet = [sn for sn in network['subnets']
if sn['version'] == 4][0]
ip = subnet['ips'][0]
ip_address = ip['address']
netmask = network_model.get_netmask(ip, subnet)
gateway = subnet['gateway']['address']
dns = subnet['dns'][0]['address']
try:
session.call_plugin_serialized("ipxe", "inject", sr_path,
cd_vdi['uuid'], boot_menu_url, ip_address, netmask,
gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd)
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'CommandNotFound':
LOG.warning(_LW("ISO creation tool '%s' does not exist."),
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
else:
raise
def set_other_config_pci(session, vm_ref, params):
"""Set the pci key of other-config parameter to params."""
other_config = session.call_xenapi("VM.get_other_config", vm_ref)
other_config['pci'] = params
session.call_xenapi("VM.set_other_config", vm_ref, other_config)
| {
"content_hash": "fb7cd1bc5aea56f5343f8f8100135323",
"timestamp": "",
"source": "github",
"line_count": 2626,
"max_line_length": 79,
"avg_line_length": 37.613480578827115,
"alnum_prop": 0.5895538254381258,
"repo_name": "thomasem/nova",
"id": "eee943f9fdbb1934c53064b508610817f3a786eb",
"size": "99474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/xenapi/vm_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16035461"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "321702"
}
],
"symlink_target": ""
} |
DNSREDIR_CONFIGDIR_PREFIX = "/etc/dnsredir/"
DNSREDIR_LOGDIR_PREFIX = "/var/log/dnsredir/"
DNSREDIR_PIDFILE_PREFIX = "/var/run/dnsredir."
REDIR_USER = "princeton_coredirect"
#REDIR_USER = "arizona_tools1"
#DEMUX_USER = "arizona_tools1"
DEMUX_USER = "princeton_codnsdemux"
HEARTBEAT_PORT = 9000
#not required as of now, as there will be only one frontend dnsdemux for all the services
#DNSDEMUX_CONFIGDIR_PREFIX = "/etc/dnsdemux/"
#DNSDEMUX_LOGDIR_PREFIX = "/var/log/dnsdemux/"
#DNSDEMUX_PIDFILE_PREFIX = "/var/run/dnsdemux."
TBD = "TBD"
| {
"content_hash": "18d7dad28606a81e890aa8bbd8b424fd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 89,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.7434944237918215,
"repo_name": "wathsalav/xos",
"id": "d0c00c24ff67a28bfaa1ef9944eb271812c0bcaf",
"size": "561",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "xos/rr_observer/rrlib_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "370"
},
{
"name": "CSS",
"bytes": "37088"
},
{
"name": "HTML",
"bytes": "636864"
},
{
"name": "JavaScript",
"bytes": "760492"
},
{
"name": "Makefile",
"bytes": "2717"
},
{
"name": "Python",
"bytes": "1160110"
},
{
"name": "Shell",
"bytes": "10483"
}
],
"symlink_target": ""
} |
import pytest
from ate.tags import CompileStatement
from ate.ate import ParseContext, Template
from ate.exceptions import ParseError
class TestErrorHandling:
def test_simple(self):
tpl = ParseContext("{% nonexisting and unclosed")
with pytest.raises(ParseError) as e:
res, skip = CompileStatement(tpl)
exc = e.value
assert exc.pc.offset == 0
assert exc.pc.code.startswith("{% nonexisting")
line, col = exc.pc.position()
assert line == 1
assert col == 1
def test_nested(self):
tpl = ParseContext("""{% for i in 'abc' %}
{%if } {{i}} {%endif%}
""")
with pytest.raises(ParseError) as e:
res, skip = CompileStatement(tpl)
exc = e.value
assert exc.pc.offset == 25
assert exc.pc.code.startswith("{%if")
line, col = exc.pc.position()
assert line == 2
assert col == 5
def test_multi_line_indent(self):
tpl = """Hello World
this is a test
a {%if }
more noise
...
"""
with pytest.raises(ParseError) as e:
Template(tpl)
exc = e.value
assert exc.pc.offset == 39
assert exc.pc.code.startswith("{%if")
line, col = exc.pc.position()
assert line == 4
assert col == 11
def test_be_smart_about_closed(self):
tpl = ParseContext("""{% for i in 'abc' %}
{%if } {{i}} {%endif%}
{%endfor%}
""")
with pytest.raises(ParseError) as e:
res, skip = CompileStatement(tpl)
exc = e.value
assert exc.pc.offset == 48
assert exc.pc.code.startswith("{%endfor")
# import pdb; pdb.set_trace()
line, col = exc.pc.position()
assert line == 3
assert col == 1
def test_complex(self):
tpl = """Hello World
{% for i in abc%}
x {{i}}
{% endfor %}
That's all!"""
t = Template(tpl)
t.render(abc="123")
| {
"content_hash": "714de4f4f4c213b1357f7799b518a05f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 57,
"avg_line_length": 24.7375,
"alnum_prop": 0.5411824153612936,
"repo_name": "iivvoo/ate",
"id": "351580b38987389bb4ad6a9d97d12fd534fc847d",
"size": "1979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_errorhandling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41721"
}
],
"symlink_target": ""
} |
import decimal
from django.core.management.color import no_style
from django.db import NotSupportedError, connection, transaction
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import DurationField, Value
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
override_settings,
skipIfDBFeature,
)
from django.utils import timezone
from ..models import Author, Book
class SimpleDatabaseOperationTests(SimpleTestCase):
may_require_msg = "subclasses of BaseDatabaseOperations may require a %s() method"
def setUp(self):
self.ops = BaseDatabaseOperations(connection=connection)
def test_deferrable_sql(self):
self.assertEqual(self.ops.deferrable_sql(), "")
def test_end_transaction_rollback(self):
self.assertEqual(self.ops.end_transaction_sql(success=False), "ROLLBACK;")
def test_no_limit_value(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "no_limit_value"
):
self.ops.no_limit_value()
def test_quote_name(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "quote_name"
):
self.ops.quote_name("a")
def test_regex_lookup(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "regex_lookup"
):
self.ops.regex_lookup(lookup_type="regex")
def test_set_time_zone_sql(self):
self.assertEqual(self.ops.set_time_zone_sql(), "")
def test_sql_flush(self):
msg = "subclasses of BaseDatabaseOperations must provide an sql_flush() method"
with self.assertRaisesMessage(NotImplementedError, msg):
self.ops.sql_flush(None, None)
def test_pk_default_value(self):
self.assertEqual(self.ops.pk_default_value(), "DEFAULT")
def test_tablespace_sql(self):
self.assertEqual(self.ops.tablespace_sql(None), "")
def test_sequence_reset_by_name_sql(self):
self.assertEqual(self.ops.sequence_reset_by_name_sql(None, []), [])
def test_adapt_unknown_value_decimal(self):
value = decimal.Decimal("3.14")
self.assertEqual(
self.ops.adapt_unknown_value(value),
self.ops.adapt_decimalfield_value(value),
)
def test_adapt_unknown_value_date(self):
value = timezone.now().date()
self.assertEqual(
self.ops.adapt_unknown_value(value), self.ops.adapt_datefield_value(value)
)
def test_adapt_unknown_value_time(self):
value = timezone.now().time()
self.assertEqual(
self.ops.adapt_unknown_value(value), self.ops.adapt_timefield_value(value)
)
def test_adapt_timefield_value_none(self):
self.assertIsNone(self.ops.adapt_timefield_value(None))
def test_adapt_timefield_value_expression(self):
value = Value(timezone.now().time())
self.assertEqual(self.ops.adapt_timefield_value(value), value)
def test_adapt_datetimefield_value_none(self):
self.assertIsNone(self.ops.adapt_datetimefield_value(None))
def test_adapt_datetimefield_value_expression(self):
value = Value(timezone.now())
self.assertEqual(self.ops.adapt_datetimefield_value(value), value)
def test_adapt_timefield_value(self):
msg = "Django does not support timezone-aware times."
with self.assertRaisesMessage(ValueError, msg):
self.ops.adapt_timefield_value(timezone.make_aware(timezone.now()))
@override_settings(USE_TZ=False)
def test_adapt_timefield_value_unaware(self):
now = timezone.now()
self.assertEqual(self.ops.adapt_timefield_value(now), str(now))
def test_format_for_duration_arithmetic(self):
msg = self.may_require_msg % "format_for_duration_arithmetic"
with self.assertRaisesMessage(NotImplementedError, msg):
self.ops.format_for_duration_arithmetic(None)
def test_date_extract_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "date_extract_sql"
):
self.ops.date_extract_sql(None, None)
def test_time_extract_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "date_extract_sql"
):
self.ops.time_extract_sql(None, None)
def test_date_trunc_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "date_trunc_sql"
):
self.ops.date_trunc_sql(None, None)
def test_time_trunc_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "time_trunc_sql"
):
self.ops.time_trunc_sql(None, None)
def test_datetime_trunc_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "datetime_trunc_sql"
):
self.ops.datetime_trunc_sql(None, None, None)
def test_datetime_cast_date_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "datetime_cast_date_sql"
):
self.ops.datetime_cast_date_sql(None, None)
def test_datetime_cast_time_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "datetime_cast_time_sql"
):
self.ops.datetime_cast_time_sql(None, None)
def test_datetime_extract_sql(self):
with self.assertRaisesMessage(
NotImplementedError, self.may_require_msg % "datetime_extract_sql"
):
self.ops.datetime_extract_sql(None, None, None)
class DatabaseOperationTests(TestCase):
def setUp(self):
self.ops = BaseDatabaseOperations(connection=connection)
@skipIfDBFeature("supports_over_clause")
def test_window_frame_raise_not_supported_error(self):
msg = "This backend does not support window expressions."
with self.assertRaisesMessage(NotSupportedError, msg):
self.ops.window_frame_rows_start_end()
@skipIfDBFeature("can_distinct_on_fields")
def test_distinct_on_fields(self):
msg = "DISTINCT ON fields is not supported by this database backend"
with self.assertRaisesMessage(NotSupportedError, msg):
self.ops.distinct_sql(["a", "b"], None)
@skipIfDBFeature("supports_temporal_subtraction")
def test_subtract_temporals(self):
duration_field = DurationField()
duration_field_internal_type = duration_field.get_internal_type()
msg = (
"This backend does not support %s subtraction."
% duration_field_internal_type
)
with self.assertRaisesMessage(NotSupportedError, msg):
self.ops.subtract_temporals(duration_field_internal_type, None, None)
class SqlFlushTests(TransactionTestCase):
available_apps = ["backends"]
def test_sql_flush_no_tables(self):
self.assertEqual(connection.ops.sql_flush(no_style(), []), [])
def test_execute_sql_flush_statements(self):
with transaction.atomic():
author = Author.objects.create(name="George Orwell")
Book.objects.create(author=author)
author = Author.objects.create(name="Harper Lee")
Book.objects.create(author=author)
Book.objects.create(author=author)
self.assertIs(Author.objects.exists(), True)
self.assertIs(Book.objects.exists(), True)
sql_list = connection.ops.sql_flush(
no_style(),
[Author._meta.db_table, Book._meta.db_table],
reset_sequences=True,
allow_cascade=True,
)
connection.ops.execute_sql_flush(sql_list)
with transaction.atomic():
self.assertIs(Author.objects.exists(), False)
self.assertIs(Book.objects.exists(), False)
if connection.features.supports_sequence_reset:
author = Author.objects.create(name="F. Scott Fitzgerald")
self.assertEqual(author.pk, 1)
book = Book.objects.create(author=author)
self.assertEqual(book.pk, 1)
| {
"content_hash": "e37c2ed72637571e739ab284d7622b97",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 87,
"avg_line_length": 37.43693693693694,
"alnum_prop": 0.6531103356996751,
"repo_name": "dsanders11/django",
"id": "b19b7ee5586f1326fa64ebbd151b481e4da51fdd",
"size": "8311",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tests/backends/base/test_operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87587"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146496"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "15995318"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
"""
Created on Mar 8, 2014
@author: tjoneslo
"""
import os
import logging
from pypdflite import PDFLite
from pypdflite import PDFCursor
from pypdflite.pdfobjects.pdfline import PDFLine
from pypdflite.pdfobjects.pdfellipse import PDFEllipse
from pypdflite.pdfobjects.pdftext import PDFText
from Galaxy import Sector, Galaxy
from Star import Star
from StatCalculation import StatCalculation
class HexMap(object):
"""
Draw the trade routes as calculated, sector by sector onto PDF files.
Used pypdflite to directly generate the PDF files.
"""
def __init__(self, galaxy, routes, min_btn=8):
self.galaxy = galaxy
self.routes = routes
self.ym = 9 # half a hex height
self.xm = 6 # half the length of one side
self.colorStart = 0
self.min_btn = min_btn
self.y_start = 43
self.x_start = 15
def write_maps(self):
"""
Starting point for writing PDF files.
Call this to output the trade maps
"""
logging.getLogger("PyRoute.HexMap").info("writing {:d} sector maps...".format(len(self.galaxy.sectors)))
for sector in self.galaxy.sectors.values():
pdf = self.document(sector)
self.write_base_map(pdf, sector)
self.draw_borders(pdf, sector)
comm_routes = [star for star in self.galaxy.stars.edges(sector.worlds, True) \
if star[2].get('xboat', False) or star[2].get('comm', False)]
for (star, neighbor, data) in comm_routes:
self.comm_line(pdf, [star, neighbor])
sector_trade = [star for star in self.galaxy.stars.edges(sector.worlds, True) \
if star[2]['trade'] > 0 and StatCalculation.trade_to_btn(star[2]['trade']) >= self.min_btn]
logging.getLogger('PyRoute.HexMap').debug("Worlds with trade: {}".format(len(sector_trade)))
sector_trade.sort(key=lambda line: line[2]['trade'])
for (star, neighbor, data) in sector_trade:
self.galaxy.stars[star][neighbor]['trade btn'] = StatCalculation.trade_to_btn(data['trade'])
self.trade_line(pdf, [star, neighbor], data)
# Get all the worlds in this sector
# for (star, neighbor, data) in self.galaxy.stars.edges(sector.worlds, True):
# if star.sector != sector:
# continue#
# if data['trade'] > 0 and self.trade_to_btn(data['trade']) >= self.min_btn:
# self.galaxy.stars[star][neighbor]['trade btn'] = self.trade_to_btn(data['trade'])
# self.trade_line(pdf, [star, neighbor], data)
# elif star.sector != neighbor.sector:
# data = self.galaxy.stars.get_edge_data(neighbor, star)
# if data is not None and \
# data['trade'] > 0 and \
# self.trade_to_btn(data['trade']) >= self.min_btn:
# self.trade_line(pdf, [star, neighbor], data)
for star in sector.worlds:
self.system(pdf, star)
if sector.coreward:
self.coreward_sector(pdf, sector.coreward.name)
if sector.rimward:
self.rimward_sector(pdf, sector.rimward.name)
if sector.spinward:
self.spinward_sector(pdf, sector.spinward.name)
if sector.trailing:
self.trailing_sector(pdf, sector.trailing.name)
self.writer.close()
def write_base_map(self, pdf, sector):
self.sector_name(pdf, sector.name)
self.subsector_grid(pdf)
self.hex_grid(pdf, self._draw_all, 0.5)
def sector_name(self, pdf, name):
cursor = PDFCursor(5, -5, True)
def_font = pdf.get_font()
pdf.set_font('times', size=30)
width = pdf.get_font()._string_width(name)
cursor.x = 306 - (width / 2)
pdf.add_text(name, cursor)
pdf.set_font(font=def_font)
def coreward_sector(self, pdf, name):
cursor = PDFCursor(5, self.y_start - 15, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
width = pdf.get_font()._string_width(name) / 2
cursor.x = 306 - width
pdf.add_text(name, cursor)
pdf.set_font(font=def_font)
def rimward_sector(self, pdf, name):
cursor = PDFCursor(306, 767, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
cursor.x_plus(-pdf.get_font()._string_width(name) / 2)
pdf.add_text(name, cursor)
pdf.set_font(font=def_font)
def spinward_sector(self, pdf, name):
cursor = PDFCursor(self.x_start - 5, 390, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
cursor.y_plus(pdf.get_font()._string_width(name) / 2)
text = PDFText(pdf.session, pdf.page, None, cursor=cursor)
text.text_rotate(90)
text._text(name)
pdf.set_font(font=def_font)
def trailing_sector(self, pdf, name):
cursor = PDFCursor(598, 390, True)
def_font = pdf.get_font()
pdf.set_font('times', size=10)
cursor.y_plus(-(pdf.get_font()._string_width(name) / 2))
text = PDFText(pdf.session, pdf.page, None, cursor=cursor)
text.text_rotate(-90)
text._text(name)
pdf.set_font(font=def_font)
def subsector_grid(self, pdf):
color = pdf.get_color()
color.set_color_by_name('lightgray')
pdf.set_draw_color(color)
vlineStart = PDFCursor(0, self.y_start + self.xm)
vlineEnd = PDFCursor(0, self.y_start + self.xm + (180 * 4))
for x in range(self.x_start, 595, 144):
vlineStart.x = x
vlineEnd.x = x
pdf.add_line(cursor1=vlineStart, cursor2=vlineEnd)
hlineStart = PDFCursor(self.x_start, 0)
hlineEnd = PDFCursor(591, 0)
for y in range(self.y_start + self.xm, 780, 180):
hlineStart.y = y
hlineEnd.y = y
pdf.add_line(cursor1=hlineStart, cursor2=hlineEnd)
def _hline(self, pdf, width, colorname):
hlineStart = PDFCursor(0, 0)
hlineStart.x = 3
hlineStart.y = self.y_start - self.ym
hlineStart.dx = self.xm * 3
hlineStart.dy = self.ym * 2
hlineEnd = PDFCursor(0, 0)
hlineEnd.x = self.xm * 2.5
hlineEnd.y = self.y_start - self.ym
hlineEnd.dx = self.xm * 3
hlineEnd.dy = self.ym * 2
color = pdf.get_color()
color.set_color_by_name(colorname)
hline = PDFLine(pdf.session, pdf.page, hlineStart, hlineEnd, stroke='solid', color=color, size=width)
return (hlineStart, hlineEnd, hline)
def _hline_restart_y(self, x, hlineStart, hlineEnd):
if (x & 1):
hlineStart.y = self.y_start - self.ym
hlineEnd.y = self.y_start - self.ym
else:
hlineStart.y = self.y_start - 2 * self.ym
hlineEnd.y = self.y_start - 2 * self.ym
def _lline(self, pdf, width, colorname):
llineStart = PDFCursor(-10, 0)
llineStart.x = self.x_start
llineStart.dx = self.xm * 3
llineStart.dy = self.ym * 2
llineEnd = PDFCursor(-10, 0)
llineEnd.x = self.x_start + self.xm
llineEnd.dx = self.xm * 3
llineEnd.dy = self.ym * 2
color = pdf.get_color()
color.set_color_by_name(colorname)
lline = PDFLine(pdf.session, pdf.page, llineStart, llineEnd, stroke='solid', color=color, size=width)
return (llineStart, llineEnd, lline)
def _lline_restart_y(self, x, llineStart, llineEnd):
if (x & 1):
llineStart.y = self.y_start - 2 * self.ym
llineEnd.y = self.y_start - self.ym
else:
llineStart.y = self.y_start - self.ym
llineEnd.y = self.y_start - 2 * self.ym
def _rline(self, pdf, width, colorname):
rlineStart = PDFCursor(0, 0)
rlineStart.x = self.x_start + self.xm
rlineStart.dx = self.xm * 3
rlineStart.dy = self.ym * 2
rlineEnd = PDFCursor(0, 0)
rlineEnd.x = self.x_start
rlineEnd.dx = self.xm * 3
rlineEnd.dy = self.ym * 2
color = pdf.get_color()
color.set_color_by_name(colorname)
rline = PDFLine(pdf.session, pdf.page, rlineStart, rlineEnd, stroke='solid', color=color, size=width)
return (rlineStart, rlineEnd, rline)
def _rline_restart_y(self, x, rlineStart, rlineEnd):
if (x & 1):
rlineStart.y = self.y_start - 3 * self.ym
rlineEnd.y = self.y_start - 2 * self.ym
else:
rlineStart.y = self.y_start - 2 * self.ym
rlineEnd.y = self.y_start - 3 * self.ym
def hex_grid(self, pdf, draw, width, colorname='gray'):
hlineStart, hlineEnd, hline = self._hline(pdf, width, colorname)
llineStart, llineEnd, lline = self._lline(pdf, width, colorname)
rlineStart, rlineEnd, rline = self._rline(pdf, width, colorname)
for x in range(33):
hlineStart.x_plus()
hlineEnd.x_plus()
self._hline_restart_y(x, hlineStart, hlineEnd)
self._lline_restart_y(x, llineStart, llineEnd)
self._rline_restart_y(x, rlineStart, rlineEnd)
for y in range(41):
hlineStart.y_plus()
hlineEnd.y_plus()
llineStart.y_plus()
llineEnd.y_plus()
rlineStart.y_plus()
rlineEnd.y_plus()
draw(x, y, hline, lline, rline)
llineStart.x_plus()
llineEnd.x_plus()
rlineStart.x_plus()
rlineEnd.x_plus()
def _draw_all(self, x, y, hline, lline, rline):
if (x < 32):
hline._draw()
lline._draw()
if (y > 0):
rline._draw()
def _draw_borders(self, x, y, hline, lline, rline):
q, r = self.convert_hex_to_axial(x + self.sector.dx, y + self.sector.dy - 1)
if self.galaxy.borders.borders.get((q, r), False):
if self.galaxy.borders.borders[(q, r)] & 1:
hline._draw()
if self.galaxy.borders.borders[(q, r)] & 2 and y > 0:
rline._draw()
if self.galaxy.borders.borders[(q, r)] & 4:
lline._draw()
def draw_borders(self, pdf, sector):
self.sector = sector
self.hex_grid(pdf, self._draw_borders, 1.5, 'salmon')
@staticmethod
def convert_hex_to_axial(row, col):
x = row
z = col - (row - (row & 1)) / 2
return (x, z)
def system(self, pdf, star):
def_font = pdf.get_font()
pdf.set_font('times', size=4)
col = (self.xm * 3 * (star.col))
if (star.col & 1):
row = (self.y_start - self.ym * 2) + (star.row * self.ym * 2)
else:
row = (self.y_start - self.ym) + (star.row * self.ym * 2)
point = PDFCursor(col, row)
self.zone(pdf, star, point.copy())
width = self.string_width(pdf.get_font(), star.uwp)
point.y_plus(7)
point.x_plus(self.ym - (width // 2))
pdf.add_text(star.uwp, point)
if len(star.name) > 0:
for chars in range(len(star.name), 0, -1):
width = self.string_width(pdf.get_font(), star.name[:chars])
if width <= self.xm * 3.5:
break
point.y_plus(3.5)
point.x = col
point.x_plus(self.ym - (width // 2))
pdf.add_text(star.name[:chars], point)
added = star.alg_code
if star.tradeCode.subsector_capital:
added += '+'
elif star.tradeCode.sector_capital or star.tradeCode.other_capital:
added += '*'
else:
added += ' '
added += '{:d}'.format(star.ggCount)
point.y_plus(3.5)
point.x = col
width = pdf.get_font()._string_width(added)
point.x_plus(self.ym - (width // 2))
pdf.add_text(added, point)
added = ''
tradeIn = StatCalculation.trade_to_btn(star.tradeIn)
tradeThrough = StatCalculation.trade_to_btn(star.tradeIn + star.tradeOver)
if self.routes == 'trade':
added += "{:X}{:X}{:X}{:d}".format(star.wtn, tradeIn, tradeThrough, star.starportSize)
elif self.routes == 'comm':
added += "{}{} {}".format(star.baseCode, star.ggCount, star.importance)
elif self.routes == 'xroute':
added += " {}".format(star.importance)
width = pdf.get_font()._string_width(added)
point.y_plus(3.5)
point.x = col
point.x_plus(self.ym - (width // 2))
pdf.add_text(added, point)
pdf.set_font(def_font)
def trade_line(self, pdf, edge, data):
tradeColors = [(255, 0, 0), # Red
(224, 224, 16), # yellow - darker
(0, 255, 0), # green
(0, 255, 255), # Cyan
(96, 96, 255), # blue - lighter
(128, 0, 128), # purple
(148, 0, 211), # violet
]
start = edge[0]
end = edge[1]
trade = StatCalculation.trade_to_btn(data['trade']) - self.min_btn
if trade < 0:
return
if trade > 6:
logging.getLogger('PyRoute.HexMap').warn("trade calculated over %d" % self.min_btn + 6)
trade = 6
tradeColor = tradeColors[trade]
color = pdf.get_color()
color.set_color_by_number(tradeColor[0], tradeColor[1], tradeColor[2])
starty = self.y_start + (self.ym * 2 * (start.row)) - (self.ym * (1 if start.col & 1 else 0))
startx = (self.xm * 3 * (start.col)) + self.ym
endRow = end.row
endCol = end.col
endCircle = True
if (end.sector != start.sector):
endCircle = False
if end.sector.x < start.sector.x:
endCol -= 32
if end.sector.x > start.sector.x:
endCol += 32
if end.sector.y < start.sector.y:
endRow -= 40
if end.sector.y > start.sector.y:
endRow += 40
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
(startx, starty), (endx, endy) = self.clipping(startx, starty, endx, endy)
else:
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
lineStart = PDFCursor(startx, starty)
lineEnd = PDFCursor(endx, endy)
line = PDFLine(pdf.session, pdf.page, lineStart, lineEnd, stroke='solid', color=color, size=1)
line._draw()
radius = PDFCursor(2, 2)
circle = PDFEllipse(pdf.session, pdf.page, lineStart, radius, color, size=3)
circle._draw()
if endCircle:
circle = PDFEllipse(pdf.session, pdf.page, lineEnd, radius, color, size=3)
circle._draw()
def comm_line(self, pdf, edge):
start = edge[0]
end = edge[1]
color = pdf.get_color()
color.set_color_by_number(102, 178, 102)
starty = self.y_start + (self.ym * 2 * (start.row)) - (self.ym * (1 if start.col & 1 else 0))
startx = (self.xm * 3 * (start.col)) + self.ym
endRow = end.row
endCol = end.col
if (end.sector != start.sector):
if end.sector.x < start.sector.x:
endCol -= 32
if end.sector.x > start.sector.x:
endCol += 32
if end.sector.y < start.sector.y:
endRow -= 40
if end.sector.y > start.sector.y:
endRow += 40
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
(startx, starty), (endx, endy) = self.clipping(startx, starty, endx, endy)
else:
endy = self.y_start + (self.ym * 2 * (endRow)) - (self.ym * (1 if endCol & 1 else 0))
endx = (self.xm * 3 * endCol) + self.ym
lineStart = PDFCursor(startx, starty)
lineEnd = PDFCursor(endx, endy)
line = PDFLine(pdf.session, pdf.page, lineStart, lineEnd, stroke='solid', color=color, size=3)
line._draw()
def zone(self, pdf, star, point):
point.x_plus(self.ym)
point.y_plus(self.ym)
color = pdf.get_color()
if star.zone in ['R', 'F']:
color.set_color_by_name('crimson')
elif star.zone in ['A', 'U']:
color.set_color_by_name('goldenrod')
else: # no zone -> do nothing
return
radius = PDFCursor(self.xm, self.xm)
circle = PDFEllipse(pdf.session, pdf.page, point, radius, color, size=2)
circle._draw()
def document(self, sector):
path = os.path.join(self.galaxy.output_path, sector.sector_name() + " Sector.pdf")
self.writer = PDFLite(path)
title = "Sector %s" % sector
subject = "Trade route map generated by PyRoute for Traveller"
author = None
keywords = None
creator = "PyPDFLite"
self.writer.set_information(title, subject, author, keywords, creator)
self.writer.set_compression(True)
document = self.writer.get_document()
document.set_margins(4)
return document
@staticmethod
def string_width(font, string):
w = 0
for i in string:
w += font.character_widths[i] if i in font.character_widths else 600
return w * font.font_size / 1000.0
def clipping(self, startx, starty, endx, endy):
points_t = [0.0, 1.0]
line_pt_1 = [startx, starty]
line_pt_2 = [endx, endy]
if startx == endx:
if starty > endy:
return ((startx, min(max(starty, endy), 780)),
(startx, max(min(starty, endy), 42)))
else:
return ((startx, max(min(starty, endy), 42)),
(startx, min(max(starty, endy), 780)))
if starty == endy:
if startx > endx:
return ((min(max(startx, endx), 600), starty),
(max(min(startx, endx), 15), starty))
else:
return ((max(min(startx, endx), 15), starty),
(min(max(startx, endx), 600), starty))
points_t.append(float(15 - startx) / (endx - startx))
points_t.append(float(600 - startx) / (endx - startx))
points_t.append(float(780 - starty) / (endy - starty))
points_t.append(float(42 - starty) / (endy - starty))
points_t.sort()
result = [(pt_1 + t * (pt_2 - pt_1)) for t in (points_t[2], points_t[3]) for (pt_1, pt_2) in
zip(line_pt_1, line_pt_2)]
logging.getLogger("PyRoute.HexMap").debug(result)
return (result[0], result[1]), (result[2], result[3])
if __name__ == '__main__':
sector = Sector('# Core', '# 0,0')
hexMap = HexMap(None)
pdf = hexMap.document(sector)
hexMap.write_base_map(pdf, sector)
galaxy = Galaxy(0, 0)
star1 = Star(
"0102 Shana Ma E551112-7 Lo Po { -3 } (300-3) [1113] B - - 913 9 Im K2 IV M7 V ",
galaxy.starline, 0, 0)
star2 = Star(
"0405 Azimuth B847427-B Ni Pa { 1 } (634+1) [455B] Bc N - 200 13 Im M2 V M7 V ",
galaxy.starline, 0, 0)
hexMap.trade_line(pdf, [star1, star2])
hexMap.system(pdf, star1)
hexMap.system(pdf, star2)
hexMap.writer.close()
| {
"content_hash": "3c7278cd4b4ea03d122f4a6661629ef3",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 125,
"avg_line_length": 36.57090239410682,
"alnum_prop": 0.5384228018934435,
"repo_name": "makhidkarun/traveller_pyroute",
"id": "3dfac1993f6fc3a8d37942e4bb8131d211df70cb",
"size": "19858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyRoute/HexMap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "348119"
}
],
"symlink_target": ""
} |
from turtle import *
cote = 150
angle = 60
begin_fill()
pencolor("black")
fillcolor("light blue")
left(angle)
forward(cote)
right(180 - angle)
forward(cote)
right(180 - angle)
forward(cote)
end_fill()
hideturtle()
done() | {
"content_hash": "d5ab4fea5d712ac83051cfcc2b87389d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 23,
"avg_line_length": 11.25,
"alnum_prop": 0.7066666666666667,
"repo_name": "TGITS/programming-workouts",
"id": "7762bdf0d306692d386de4e76b68272ac15472b1",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "erri/python/lesson_34/triangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "244"
},
{
"name": "C#",
"bytes": "175"
},
{
"name": "CSS",
"bytes": "57544"
},
{
"name": "Clojure",
"bytes": "145363"
},
{
"name": "D",
"bytes": "5141"
},
{
"name": "Dart",
"bytes": "80832"
},
{
"name": "Dockerfile",
"bytes": "811"
},
{
"name": "Elixir",
"bytes": "86418"
},
{
"name": "Elm",
"bytes": "2738"
},
{
"name": "F#",
"bytes": "4142"
},
{
"name": "Gherkin",
"bytes": "503"
},
{
"name": "Gnuplot",
"bytes": "2363"
},
{
"name": "Go",
"bytes": "65562"
},
{
"name": "Groovy",
"bytes": "2457"
},
{
"name": "HTML",
"bytes": "1536579"
},
{
"name": "Haskell",
"bytes": "157"
},
{
"name": "Java",
"bytes": "744052"
},
{
"name": "JavaScript",
"bytes": "79838"
},
{
"name": "Jinja",
"bytes": "362"
},
{
"name": "Julia",
"bytes": "1365"
},
{
"name": "Kotlin",
"bytes": "53565"
},
{
"name": "Lua",
"bytes": "3980"
},
{
"name": "PHP",
"bytes": "264599"
},
{
"name": "Pascal",
"bytes": "2952"
},
{
"name": "Perl",
"bytes": "927"
},
{
"name": "PowerShell",
"bytes": "397"
},
{
"name": "Prolog",
"bytes": "574"
},
{
"name": "Pug",
"bytes": "550"
},
{
"name": "Python",
"bytes": "550192"
},
{
"name": "R",
"bytes": "19071"
},
{
"name": "Raku",
"bytes": "5189"
},
{
"name": "Ruby",
"bytes": "27911"
},
{
"name": "Rust",
"bytes": "71504"
},
{
"name": "Scala",
"bytes": "136475"
},
{
"name": "Shell",
"bytes": "9158"
},
{
"name": "TypeScript",
"bytes": "64644"
}
],
"symlink_target": ""
} |
import os
from collections import OrderedDict
from nmt import train
from char_base import *
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'fff': ('param_init_ffflayer', 'ffflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'two_layer_gru_decoder': ('param_init_two_layer_gru_decoder',
'two_layer_gru_decoder'),
}
def main(job_id, params):
re_load = False
save_file_name = 'bpe2char_two_layer_gru_decoder_adam'
source_dataset = params['train_data_path'] + params['source_dataset']
target_dataset = params['train_data_path'] + params['target_dataset']
valid_source_dataset = params['dev_data_path'] + params['valid_source_dataset']
valid_target_dataset = params['dev_data_path'] + params['valid_target_dataset']
source_dictionary = params['train_data_path'] + params['source_dictionary']
target_dictionary = params['train_data_path'] + params['target_dictionary']
print params, params['save_path'], save_file_name
validerr = train(
max_epochs=int(params['max_epochs']),
patience=int(params['patience']),
dim_word=int(params['dim_word']),
dim_word_src=int(params['dim_word_src']),
save_path=params['save_path'],
save_file_name=save_file_name,
re_load=re_load,
enc_dim=int(params['enc_dim']),
dec_dim=int(params['dec_dim']),
n_words=int(params['n_words']),
n_words_src=int(params['n_words_src']),
decay_c=float(params['decay_c']),
lrate=float(params['learning_rate']),
optimizer=params['optimizer'],
maxlen=int(params['maxlen']),
maxlen_trg=int(params['maxlen_trg']),
maxlen_sample=int(params['maxlen_sample']),
batch_size=int(params['batch_size']),
valid_batch_size=int(params['valid_batch_size']),
sort_size=int(params['sort_size']),
validFreq=int(params['validFreq']),
dispFreq=int(params['dispFreq']),
saveFreq=int(params['saveFreq']),
sampleFreq=int(params['sampleFreq']),
clip_c=int(params['clip_c']),
datasets=[source_dataset, target_dataset],
valid_datasets=[valid_source_dataset, valid_target_dataset],
dictionaries=[source_dictionary, target_dictionary],
use_dropout=int(params['use_dropout']),
source_word_level=int(params['source_word_level']),
target_word_level=int(params['target_word_level']),
layers=layers,
save_every_saveFreq=1,
use_bpe=1,
init_params=init_params,
build_model=build_model,
build_sampler=build_sampler,
gen_sample=gen_sample
)
return validerr
if __name__ == '__main__':
import sys, time
if len(sys.argv) > 1:
config_file_name = sys.argv[-1]
else:
config_file_name = 'wmt15_deen_bpe2char_adam.txt'
f = open(config_file_name, 'r')
lines = f.readlines()
params = OrderedDict()
for line in lines:
line = line.split('\n')[0]
param_list = line.split(' ')
param_name = param_list[0]
param_value = param_list[1]
params[param_name] = param_value
main(0, params)
| {
"content_hash": "5c3f6475150e96f0893db95639b23726",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 83,
"avg_line_length": 36.93103448275862,
"alnum_prop": 0.6034858387799564,
"repo_name": "nyu-dl/dl4mt-cdec",
"id": "0ddcac79edf7f048c100db801aa5c67a7a5bb0b0",
"size": "3213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "character_base/train_wmt15_deen_bpe2char_adam.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "17034"
},
{
"name": "JavaScript",
"bytes": "835"
},
{
"name": "NewLisp",
"bytes": "1582"
},
{
"name": "Perl",
"bytes": "40544"
},
{
"name": "Python",
"bytes": "456260"
},
{
"name": "Ruby",
"bytes": "1649"
},
{
"name": "Shell",
"bytes": "2211"
},
{
"name": "Slash",
"bytes": "356"
},
{
"name": "Smalltalk",
"bytes": "1892"
},
{
"name": "SystemVerilog",
"bytes": "184"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12); | {
"content_hash": "8d1c6f1ba12e63dfa08d5f675be18677",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 166,
"avg_line_length": 38,
"alnum_prop": 0.706766917293233,
"repo_name": "antoinecarme/pyaf",
"id": "067c49ae64e56517e0a7b36a5bdee8cb4ba32cac",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Difference_Lag1Trend_30_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""Configuration for Cooking Basic Mini.
This is an extension of the cooking environments from:
meltingpot/python/configs/substrates/collaborative_cooking.py
The structure of this file (and how configs are handled for these environments)
follow the design patterns introduced there.
Same as Cooking Basic but a miniature version See Cooking Basic for full
details.
This substrate is a pure common interest game. All players share all rewards.
Players have a `5 x 5` observation window.
"""
from concept_marl.experiments.meltingpot.substrates import cooking_basics as base_config
def get_config():
"""Default config for training on collaborative cooking."""
config = base_config.get_config("basic_mini")
return config
| {
"content_hash": "921bab903d1fe9e66d241c4049dfd910",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 31.782608695652176,
"alnum_prop": 0.7906976744186046,
"repo_name": "google-research/google-research",
"id": "666e8b1bcf74f31700247a774583c32cb4c2131b",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concept_marl/experiments/meltingpot/substrates/cooking_basics_basic_mini.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import cmd
import atexit
import subprocess
import readline
import os
from Zpy.Processor import Processor
from Zpy.Completer import Completer
import sys
class Cmd(cmd.Cmd):
def __init__(self):
super(Cmd, self).__init__()
self.prompt = '(Zpy) '
self.processor = Processor()
self.completer = Completer()
self.init_history()
self.init_env()
def init_env(self):
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
def init_history(self):
histfile = os.path.join(os.getcwd(), ".python_history")
try:
readline.read_history_file(histfile)
readline.set_history_length(1000)
readline.write_history_file(histfile)
except (FileNotFoundError, PermissionError):
readline.write_history_file(histfile)
pass
atexit.register(readline.write_history_file, histfile)
def default(self, line):
print(self.processor.forward(line))
def do_EOF(self, line):
print("Bye bye!")
sys.exit(0)
def do_help(self, arg):
readme_location = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md")
print( open(readme_location).read())
def cmdloop(self, intro=None):
while True:
try:
super(Cmd, self).cmdloop(intro="")
self.postloop()
break
except KeyboardInterrupt as ex:
print("^C")
except Exception as ex:
print(ex)
def complete(self, text, index):
return self.completer.complete(text, index)
if __name__ == "__main__":
Cmd().complete("ls",0) | {
"content_hash": "c9da248f498213b6f98b1d43305f90bd",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 95,
"avg_line_length": 28.046875,
"alnum_prop": 0.5777158774373259,
"repo_name": "albertaleksieiev/zpy",
"id": "2ec515cf92922189169b71038a1777672ee0008e",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Zpy/Cmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87029"
}
],
"symlink_target": ""
} |
from flask import Blueprint, render_template, flash, request, abort, url_for, redirect
from flask_login.utils import login_required, login_user, logout_user
from modules.Auth.forms import LoginForm
from modules.Auth.models import User
Keys = Blueprint('Keys', __name__, template_folder='templates', static_folder='static')
@Keys.route('/show-used-keys', methods=['GET'])
@login_required
def login():
# user_keys =
zrobić base template w app która dziedziczy po maine i zostaje nam tylko kontent do ogarniania
return render_template('key_list.html')
| {
"content_hash": "8c7ed46e108f3bf58d5db16f0f39702f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 98,
"avg_line_length": 40.285714285714285,
"alnum_prop": 0.75177304964539,
"repo_name": "srebrny/simple-rest-api",
"id": "80f7c9ade6a49cda22dda41b66546a610c7f19a1",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/modules/Keys/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "158659"
},
{
"name": "HTML",
"bytes": "171560"
},
{
"name": "JavaScript",
"bytes": "620882"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "9841"
}
],
"symlink_target": ""
} |
"""
Updated by Lin Xiong Jul-21, 2017
Modified by Lin Xiong Oct-30, 2017 (add SE building block)
Thanks to Cher Keng Heng
"""
import argparse,logging,os
import mxnet as mx
from symbol_se_inception_v4 import get_symbol
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
def multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90, 95, 110, 120], factor=0.1):
#def multi_factor_scheduler(begin_epoch, epoch_size, step=[15, 30, 45, 60, 75, 90, 115], factor=0.1):
step_ = [epoch_size * (x-begin_epoch) for x in step if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
def main():
ratio_list = [0.25, 0.125, 0.0625, 0.03125] # 1/4, 1/8, 1/16, 1/32
if args.data_type == "cifar10":
args.aug_level = 1
args.num_classes = 10
symbol = get_symbol(ratio_list[2], args.num_classes)
elif args.data_type == "imagenet":
args.num_classes = 1000
symbol = get_symbol(ratio_list[2], args.num_classes)
else:
raise ValueError("do not support {} yet".format(args.data_type))
kv = mx.kvstore.create(args.kv_store)
devs = mx.cpu() if args.gpus is None else [mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = max(int(args.num_examples / args.batch_size / kv.num_workers), 1)
begin_epoch = args.model_load_epoch if args.model_load_epoch else 0
if not os.path.exists("./model"):
os.mkdir("./model")
model_prefix = "model/se-inceptionv4-{}-{}-{}".format(args.data_type, kv.rank, 0)
checkpoint = mx.callback.do_checkpoint(model_prefix)
arg_params = None
aux_params = None
if args.retrain:
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.model_load_epoch)
if args.memonger:
import memonger
symbol = memonger.search_plan(symbol, data=(args.batch_size, 3, 32, 32) if args.data_type=="cifar10"
else (args.batch_size, 3, 224, 224))
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "train.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "train_256_q90.rec") if args.aug_level == 1
else os.path.join(args.data_dir, "train_480_q90.rec") ,
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
batch_size = args.batch_size,
pad = 4 if args.data_type == "cifar10" else 0,
fill_value = 127, # only used when pad is valid
rand_crop = True,
max_random_scale = 1.0, # 480 with imagnet, 32 with cifar10
min_random_scale = 1.0 if args.data_type == "cifar10" else 1.0 if args.aug_level == 1 else 0.533, # 256.0/480.0=0.533, 256.0/384.0=0.667 256.0/256=1.0
max_aspect_ratio = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 0.25, # 0.25
random_h = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90
random_s = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
random_l = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
max_random_contrast = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90,
max_random_illumination = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127,
max_rotate_angle = 0 if args.aug_level <= 2 else 10,
max_shear_ratio = 0 if args.aug_level <= 2 else 0.1, #0.1 args.aug_level = 3
rand_mirror = True,
shuffle = True,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "val.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "val_256_q90.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = args.batch_size,
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
rand_crop = False,
rand_mirror = False,
num_parts = kv.num_workers,
part_index = kv.rank)
model = mx.model.FeedForward(
ctx = devs,
symbol = symbol,
arg_params = arg_params,
aux_params = aux_params,
num_epoch = 200 if args.data_type == "cifar10" else 125,
begin_epoch = begin_epoch,
learning_rate = args.lr,
momentum = args.mom,
wd = args.wd,
optimizer = 'nag',
# optimizer = 'sgd',
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
lr_scheduler = multi_factor_scheduler(begin_epoch, epoch_size, step=[220, 260, 280], factor=0.1)
if args.data_type=='cifar10' else
multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90, 95, 110, 120], factor=0.1),
)
model.fit(
X = train,
eval_data = val,
eval_metric = ['acc'] if args.data_type=='cifar10' else
['acc', mx.metric.create('top_k_accuracy', top_k = 5), mx.metric.create('rmse'), mx.metric.create('ce')],
kvstore = kv,
batch_end_callback = mx.callback.Speedometer(args.batch_size, args.frequent),
epoch_end_callback = checkpoint)
# logging.info("top-1 and top-5 acc is {}".format(model.score(X = val,
# eval_metric = ['acc', mx.metric.create('top_k_accuracy', top_k = 5)])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="command for training resnet-v2")
parser.add_argument('--gpus', type=str, default='0', help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--data-dir', type=str, default='./data/imagenet/', help='the input data directory')
parser.add_argument('--data-type', type=str, default='imagenet', help='the dataset type')
parser.add_argument('--list-dir', type=str, default='./',
help='the directory which contain the training list file')
parser.add_argument('--lr', type=float, default=0.1, help='initialization learning reate')
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--bn-mom', type=float, default=0.9, help='momentum for batch normlization')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=256, help='the batch size')
parser.add_argument('--workspace', type=int, default=512, help='memory space size(MB) used in convolution, if xpu '
' memory is oom, then you can try smaller vale, such as --workspace 256')
parser.add_argument('--num-classes', type=int, default=1000, help='the class number of your task')
parser.add_argument('--aug-level', type=int, default=2, choices=[1, 2, 3],
help='level 1: use only random crop and random mirror\n'
'level 2: add scale/aspect/hsv augmentation based on level 1\n'
'level 3: add rotation/shear augmentation based on level 2')
parser.add_argument('--num-examples', type=int, default=1281167, help='the number of training examples')
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--model-load-epoch', type=int, default=0,
help='load the model on an epoch using the model-load-prefix')
parser.add_argument('--frequent', type=int, default=50, help='frequency of logging')
parser.add_argument('--memonger', action='store_true', default=False,
help='true means using memonger to save momory, https://github.com/dmlc/mxnet-memonger')
parser.add_argument('--retrain', action='store_true', default=False, help='true means continue training')
args = parser.parse_args()
hdlr = logging.FileHandler('./log/log-se-inceptionv4-{}-{}.log'.format(args.data_type, 0))
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logging.info(args)
main()
| {
"content_hash": "bd2581dd5e7a71edc169afa3f0417605",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 178,
"avg_line_length": 59.91503267973856,
"alnum_prop": 0.5692156648849133,
"repo_name": "bruinxiong/SENet.mxnet",
"id": "c73def109d4ecb8d7f21319819e0769e4f3d9f58",
"size": "9167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train_se_inception_v4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98112"
},
{
"name": "Shell",
"bytes": "1703"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from decimal import Decimal
from mock import MagicMock, patch
from prices import Price
import pytest
from satchless.item import InsufficientStock
from . import Cart, SessionCart
from . import forms
from .forms import ReplaceCartLineForm, ReplaceCartLineFormSet
from ..cart.utils import (
contains_unavailable_products, remove_unavailable_products)
from ..checkout.core import Checkout
from ..checkout.views import details as checkout_details
from ..product.models import (ProductVariant, Product)
class BigShipVariant(ProductVariant):
class Meta:
app_label = 'product'
def get_price_per_item(self, discounted=True, **kwargs):
return Price(10, currency='USD')
def get_stock_quantity(self):
return 10
@property
def product(self):
return BigShip(name='Big Ship', price=Price(10, currency='USD'),
weight=Decimal(123))
def display_variant(self, attributes=None):
return 'BIG SHIP'
class BigShip(Product):
class Meta:
app_label = 'product'
def get_slug(self):
return 'bigship'
class ShipPhotoVariant(ProductVariant):
@property
def product(self):
return ShipPhoto(name='Ship Photo', price=Price(10, currency='USD'))
def check_quantity(self, quantity):
pass
def display_variant(self, attributes=None):
return 'SHIP PHOTO'
class Meta:
app_label = 'product'
class ShipPhoto(Product):
def get_slug(self):
return 'bigship-photo'
def is_shipping_required(self):
return False
class Meta:
app_label = 'product'
class AddToCartForm(forms.AddToCartForm):
def get_variant(self, cleaned_data):
return self.product
stocked_variant = BigShipVariant(name='Big Ship')
non_stocked_variant = ShipPhotoVariant(name='Ship Photo')
def test_cart_checks_quantity():
cart = Cart(session_cart=MagicMock())
with pytest.raises(InsufficientStock):
cart.add(stocked_variant, 100)
assert not cart
def test_cart_add_adds_to_session_cart():
cart = Cart(session_cart=SessionCart())
cart.add(stocked_variant, 10)
assert cart.session_cart.count() == 10
assert cart.session_cart.modified
assert cart.session_cart[0].product == stocked_variant.display_product()
def test_quantity_is_correctly_saved():
cart = Cart(session_cart=MagicMock())
data = {'quantity': 5}
form = AddToCartForm(data, cart=cart, product=stocked_variant)
assert form.is_valid()
assert not cart
form.save()
product_quantity = cart.get_line(stocked_variant).quantity
assert product_quantity == 5
def test_multiple_actions_result_in_combined_quantity():
cart = Cart(session_cart=MagicMock())
data = {'quantity': 5}
form = AddToCartForm(data, cart=cart, product=stocked_variant)
assert form.is_valid()
form.save()
form = AddToCartForm(data, cart=cart, product=stocked_variant)
assert form.is_valid()
form.save()
product_quantity = cart.get_line(stocked_variant).quantity
assert product_quantity == 10
def test_excessive_quantity_is_rejected():
cart = Cart(session_cart=MagicMock())
data = {'quantity': 15}
form = AddToCartForm(data, cart=cart, product=stocked_variant)
assert not form.is_valid()
assert not cart
def test_replace_form_replaces_quantity():
cart = Cart(session_cart=MagicMock())
data = {'quantity': 5}
form = ReplaceCartLineForm(data, cart=cart, product=stocked_variant)
assert form.is_valid()
form.save()
product_quantity = cart.get_line(stocked_variant).quantity
assert product_quantity == 5
form = ReplaceCartLineForm(data, cart=cart, product=stocked_variant)
assert form.is_valid()
form.save()
product_quantity = cart.get_line(stocked_variant).quantity
assert product_quantity == 5
def test_replace_form_rejects_excessive_quantity():
cart = Cart(session_cart=MagicMock())
data = {'quantity': 15}
form = ReplaceCartLineForm(data, cart=cart, product=stocked_variant)
assert not form.is_valid()
def test_replace_formset_works():
cart = Cart(session_cart=MagicMock())
cart.add(stocked_variant, 5)
cart.add(non_stocked_variant, 100)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-0-quantity': 5,
'form-1-quantity': 5}
form = ReplaceCartLineFormSet(data, cart=cart)
assert form.is_valid()
form.save()
product_quantity = cart.get_line(stocked_variant).quantity
assert product_quantity == 5
def test_session_cart_returns_correct_prices():
cart = Cart(session_cart=SessionCart())
cart.add(stocked_variant, quantity=10)
cart_price = cart[0].get_price_per_item()
sessioncart_price = cart.session_cart[0].get_price_per_item()
assert cart_price == sessioncart_price
def test_cart_contains_unavailable_products():
cart = Cart(session_cart=SessionCart())
cart.add(non_stocked_variant, quantity=100)
cart.add(stocked_variant, quantity=12, check_quantity=False)
assert contains_unavailable_products(cart)
def test_cart_contains_only_available_products():
cart = Cart(session_cart=SessionCart())
cart.add(non_stocked_variant, quantity=100)
cart.add(stocked_variant, quantity=10, check_quantity=False)
assert not contains_unavailable_products(cart)
def test_cart_contains_products_on_stock():
cart = Cart(session_cart=SessionCart())
cart.add(stocked_variant, quantity=12, check_quantity=False)
assert cart.count() == 12
remove_unavailable_products(cart)
assert cart.count() == 10
def test_cart_doesnt_contain_empty_products():
stocked_variant = BigShipVariant(name='Big Ship')
stocked_variant.get_stock_quantity = MagicMock(return_value=0)
cart = Cart(session_cart=SessionCart())
cart.add(stocked_variant, quantity=10, check_quantity=False)
remove_unavailable_products(cart)
assert len(cart) == 0
@patch.object(Cart, 'for_session_cart')
@patch('saleor.checkout.views.redirect')
def test_checkout_redirects_on_cart_page(mocked_redirect, mocked_cart):
cart = Cart(session_cart=SessionCart())
cart.add(stocked_variant, quantity=12, check_quantity=False)
mocked_cart.return_value = cart
checkout_details(request=MagicMock(), step=None)
mocked_redirect.assert_called_once_with('cart:index')
@patch.object(Checkout, 'get_next_step')
@patch.object(Cart, 'for_session_cart')
@patch('saleor.checkout.views.redirect')
def test_checkout_redirects_on_next_step(
mocked_redirect, mocked_cart, mocked_step):
next_step = 'next_step'
cart = Cart(session_cart=SessionCart())
cart.add(stocked_variant, quantity=1)
mocked_cart.return_value = cart
mocked_step.return_value = next_step
checkout_details(request=MagicMock(), step=None)
mocked_redirect.assert_called_once_with(next_step)
| {
"content_hash": "a7f66be305c5d137f1749bdcd83e879b",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 76,
"avg_line_length": 30.01298701298701,
"alnum_prop": 0.6978220106735901,
"repo_name": "spartonia/saleor",
"id": "d86586bc6925c5ace97982e841ffbe6e1dcd2250",
"size": "6933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/cart/test_cart.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27844"
},
{
"name": "HTML",
"bytes": "189431"
},
{
"name": "JavaScript",
"bytes": "18707"
},
{
"name": "Python",
"bytes": "292182"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
} |
import json
from anchore_engine import db
from anchore_engine.db import ObjectStorageRecord
def put(userId, bucket, key, data, metadata=None, session=None):
if not session:
session = db.Session
our_result = (
session.query(ObjectStorageRecord)
.filter_by(userId=userId, bucket=bucket, key=key)
.one_or_none()
)
if not our_result:
if metadata:
meta = json.dumps(metadata)
else:
meta = None
obj = ObjectStorageRecord(userId=userId, bucket=bucket, key=key, metadata=meta)
obj.content = data
session.add(obj)
return True
else:
# No way to remove data, use delete if that is desired. This allows updating metadata only if needed by not including data
if data is not None:
our_result.content = data
if metadata:
our_result.metadata = json.dumps(metadata)
return True
def get(userId, bucket, key, session=None):
result = (
session.query(ObjectStorageRecord)
.filter_by(userId=userId, bucket=bucket, key=key)
.first()
)
return result.to_dict() if result else None
def get_metadata(userId, bucket, key, session=None):
ret = {}
result = (
session.query(
ObjectStorageRecord.userId,
ObjectStorageRecord.bucket,
ObjectStorageRecord.key,
ObjectStorageRecord.object_metadata,
ObjectStorageRecord.created_at,
ObjectStorageRecord.last_updated,
)
.filter_by(userId=userId, bucket=bucket, key=key)
.first()
)
if result:
for i in range(0, len(list(result.keys()))):
k = list(result.keys())[i]
if i == "object_metadata":
ret[k] = json.loads(result[i])
else:
ret[k] = result[i]
return ret
def exists(userId, bucket, key, session=None):
if not session:
session = db.Session
result = (
session.query(
ObjectStorageRecord.userId,
ObjectStorageRecord.bucket,
ObjectStorageRecord.key,
)
.filter_by(userId=userId, bucket=bucket, key=key)
.first()
)
return result is not None
def list_all(session=None, **dbfilter):
if not session:
session = db.Session
ret = []
results = session.query(
ObjectStorageRecord.bucket,
ObjectStorageRecord.key,
ObjectStorageRecord.userId,
ObjectStorageRecord.record_state_key,
ObjectStorageRecord.record_state_val,
ObjectStorageRecord.created_at,
ObjectStorageRecord.last_updated,
).filter_by(**dbfilter)
for result in results:
obj = {}
for i in range(0, len(list(result.keys()))):
k = list(result.keys())[i]
obj[k] = result[i]
if obj:
ret.append(obj)
return ret
def delete(userId, bucket, key, session=None):
if not session:
session = db.Session
result = (
session.query(ObjectStorageRecord)
.filter_by(userId=userId, bucket=bucket, key=key)
.first()
)
if result:
session.delete(result)
return True
| {
"content_hash": "0b50762e4a8aef7befa0e737737cbc5a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 130,
"avg_line_length": 25.928,
"alnum_prop": 0.5887071891391545,
"repo_name": "anchore/anchore-engine",
"id": "e29089321c4f2bda11f1bb3f1153b0f3ffcf1c98",
"size": "3241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anchore_engine/db/db_objectstorage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
from MEHI.serial.fusion import *
from MEHI.serial.IO import load_tiff
from test_utils import LocalTestCase
import numpy as np
import os,sys
L_pwd = os.path.abspath('.') + '/test_data/L_side/'
R_pwd = os.path.abspath('.') + '/test_data/R_side/'
class LocalTestFusionCase(LocalTestCase):
def setUp(self):
super(LocalTestFusionCase, self).setUp()
self.L_imgs = load_tiff(L_pwd)
self.R_imgs = load_tiff(R_pwd)
def tearDown(self):
super(LocalTestFusionCase, self).tearDown()
class TestSerailFusion(LocalTestFusionCase):
def test_content_fusion(self):
img_stack = zip(self.L_imgs, self.R_imgs)
fused_img = content_fusion(img_stack)
assert (fused_img.dtype == self.L_imgs.dtype)
assert (fused_img.shape == self.L_imgs.shape)
def test_wavelet_fusion(self):
img_stack = zip(self.L_imgs, self.R_imgs)
fused_img = wavelet_fusion(img_stack)
assert (fused_img.dtype == self.L_imgs.dtype)
assert (fused_img.shape == self.L_imgs.shape)
| {
"content_hash": "508ded88ed5e898fcaeb31c4156d1907",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 53,
"avg_line_length": 32.625,
"alnum_prop": 0.6561302681992337,
"repo_name": "septicmk/MEHI",
"id": "374ca83de4476b44d656e950743d9421f22a5e6c",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_fusion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4087"
},
{
"name": "C++",
"bytes": "33417"
},
{
"name": "Makefile",
"bytes": "318"
},
{
"name": "Python",
"bytes": "97232"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gc
import os
import pickle
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
def testGPUInt64(self):
if not context.context().num_gpus():
return
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
copied_variable = copy.deepcopy(variable)
copied_variable.assign(4 * np.ones((4, 4, 4)))
# Copying the variable should create a new underlying tensor with distinct
# values.
self.assertFalse(np.allclose(variable.numpy(), copied_variable.numpy()))
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
def testFetchHandle(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertGreater(len(handle.eval()), 0)
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
sess.run(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(10.0)
pickle.dump(v, f)
with open(fname, "rb") as f:
v = pickle.load(f)
self.assertAllEqual(v.numpy(), 10.0)
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
def testScatterUpdateString(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
def testScatterUpdateStringScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with self.test_session(use_gpu=True):
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testHandleNumpy(self):
with context.eager_mode():
with self.assertRaises(ValueError):
resource_variable_ops.ResourceVariable(
1.0, name="handle-numpy").handle.numpy()
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, sess.run(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
sess.run(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
sess.run(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
self.assertEquals(v._handle, w._handle)
self.assertEquals(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEager(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
variables.global_variables_initializer().run()
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError("Resource .*/var5/.* does not exist"):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertTrue(isinstance(v.handle, ops.EagerTensor))
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = var._handle
del var
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 1, 1, 1, 1, 1, 1, 1], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 12, 1, 11, 10, 1, 1, 13])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time) and
# eager execution (where the error is realized during kernel execution).
with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
with self.assertRaisesRegexp(Exception, r"hapes must be equal"):
self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4])
@test_util.run_in_graph_and_eager_modes
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
with self.session(copy_to_graph) as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(copied.initializer)
class _MixedPrecisionVariableTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def test_dense_var_to_tensor_read_dtype_same_as_var_dtype(self):
# read_dtype is same as dtype
v = resource_variable_ops.ResourceVariable(1.0, dtype=dtypes.float32)
v = resource_variable_ops._MixedPrecisionVariable(v, dtypes.float32)
if not context.executing_eagerly():
v.initializer.run()
# dtype is not read_dtype, return NotImplemented
self.assertEqual(
NotImplemented, v._dense_var_to_tensor(dtype=dtypes.float16))
self.assertEqual(NotImplemented,
v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=True))
# as_ref is False
t = v._dense_var_to_tensor(as_ref=False)
self.assertTrue(isinstance(t, ops.Tensor))
self.assertEqual(t.dtype, dtypes.float32)
self.assertEqual(self.evaluate(t), 1.0)
t = v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=False)
self.assertTrue(isinstance(t, ops.Tensor))
self.assertEqual(t.dtype, dtypes.float32)
self.assertEqual(self.evaluate(t), 1.0)
# as_ref is True
self.assertEqual(NotImplemented, v._dense_var_to_tensor(as_ref=True))
self.assertEqual(NotImplemented,
v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=True))
@test_util.run_in_graph_and_eager_modes()
def test_dense_var_to_tensor_read_dtype_different_from_var_dtype(self):
# read_dtype is different from dtype
v = resource_variable_ops.ResourceVariable(1.0, dtype=dtypes.float32)
v = resource_variable_ops._MixedPrecisionVariable(v, dtypes.float16)
if not context.executing_eagerly():
v.initializer.run()
# as_ref is False
t = v._dense_var_to_tensor(as_ref=False)
self.assertTrue(isinstance(t, ops.Tensor))
self.assertEqual(t.dtype, dtypes.float16)
self.assertEqual(self.evaluate(t), 1.0)
t = v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=False)
self.assertTrue(isinstance(t, ops.Tensor))
self.assertEqual(t.dtype, dtypes.float16)
self.assertEqual(self.evaluate(t), 1.0)
# as_ref is True
self.assertEqual(NotImplemented, v._dense_var_to_tensor(as_ref=True))
self.assertEqual(NotImplemented,
v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=True))
if __name__ == "__main__":
test.main()
| {
"content_hash": "cb2db928ce166376401077cefc9e3819",
"timestamp": "",
"source": "github",
"line_count": 974,
"max_line_length": 88,
"avg_line_length": 42.5482546201232,
"alnum_prop": 0.6538777086047971,
"repo_name": "brchiu/tensorflow",
"id": "45b9ede813e1592fa28d1c8c6c9dc9aeea0a8d6d",
"size": "42131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/resource_variable_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""OpenID Provider Authentication interface unit test package
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "12/11/09"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
| {
"content_hash": "c8885333d750e84a0af306d8d3d2cc7b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 68,
"avg_line_length": 33.3,
"alnum_prop": 0.6846846846846847,
"repo_name": "philipkershaw/ndg_security_server",
"id": "bd3fae4aefff10dc2528034598f6105240ce064e",
"size": "333",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndg/security/server/test/unit/openid/provider/authninterface/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "206589"
},
{
"name": "JavaScript",
"bytes": "32078"
},
{
"name": "Python",
"bytes": "861854"
}
],
"symlink_target": ""
} |
from udsoncan.client import Client
from udsoncan import services
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestRoutineControl(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_start_routine_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x31\x01\x00\x12\x45\x67\x89\xaa")
self.conn.fromuserqueue.put(b"\x71\x01\x00\x12\x99\x88") # Positive response
def _test_start_routine_success(self):
response = self.udsclient.start_routine(routine_id=0x12, data = b'\x45\x67\x89\xaa')
self.assertTrue(response.positive)
self.assertEqual(response.service_data.control_type_echo, 0x1)
self.assertEqual(response.service_data.routine_id_echo, 0x12)
self.assertEqual(response.service_data.routine_status_record, b'\x99\x88')
def test_start_routine_success_spr(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x31\x81\x00\x12\x45\x67\x89\xaa")
self.conn.fromuserqueue.put('wait') # Synchronize
def _test_start_routine_success_spr(self):
with self.udsclient.suppress_positive_response:
response = self.udsclient.start_routine(routine_id=0x12, data = b'\x45\x67\x89\xaa')
self.assertEqual(response, None)
self.conn.fromuserqueue.get(timeout=0.2) #Avoid closing connection prematurely
def test_stop_routine_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x31\x02\x12\x34\x45\x67\x89\xaa")
self.conn.fromuserqueue.put(b"\x71\x02\x12\x34\x99\x88") # Positive response
def _test_stop_routine_success(self):
response = self.udsclient.stop_routine(routine_id=0x1234, data = b'\x45\x67\x89\xaa')
self.assertTrue(response.positive)
self.assertEqual(response.service_data.control_type_echo, 0x2)
self.assertEqual(response.service_data.routine_id_echo, 0x1234)
self.assertEqual(response.service_data.routine_status_record, b'\x99\x88')
def test_get_routine_result_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x31\x03\x12\x34\x45\x67\x89\xaa")
self.conn.fromuserqueue.put(b"\x71\x03\x12\x34\x99\x88") # Positive response
def _test_get_routine_result_success(self):
response = self.udsclient.get_routine_result(routine_id=0x1234, data = b'\x45\x67\x89\xaa')
self.assertTrue(response.positive)
self.assertEqual(response.service_data.control_type_echo, 0x3)
self.assertEqual(response.service_data.routine_id_echo, 0x1234)
self.assertEqual(response.service_data.routine_status_record, b'\x99\x88')
def test_routine_control_denied_exception(self):
self.wait_request_and_respond(b"\x7F\x31\x72") #General Programming FAilure
def _test_routine_control_denied_exception(self):
with self.assertRaises(NegativeResponseException) as handle:
self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
response = handle.exception.response
self.assertTrue(response.valid)
self.assertFalse(response.positive)
self.assertTrue(issubclass(response.service, services.RoutineControl))
self.assertEqual(response.code, 0x72)
def test_routine_control_denied_no_exception(self):
self.wait_request_and_respond(b"\x7F\x31\x72") #General Programming FAilure
def _test_routine_control_denied_no_exception(self):
self.udsclient.config['exception_on_negative_response'] = False
response = self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
self.assertTrue(response.valid)
self.assertFalse(response.positive)
self.assertTrue(issubclass(response.service, services.RoutineControl))
self.assertEqual(response.code, 0x72)
def test_routine_control_invalidservice_exception(self):
self.wait_request_and_respond(b"\x00\x11\x12\x34") #Inexistent Service
def _test_routine_control_invalidservice_exception(self):
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
def test_routine_control_invalidservice_no_exception(self):
self.wait_request_and_respond(b"\x00\x11\x12\x34") #Inexistent Service
def _test_routine_control_invalidservice_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
response = self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
self.assertFalse(response.valid)
def test_routine_control_wrongservice_exception(self):
self.wait_request_and_respond(b"\x7E\x11\x12\x34") # Valid but wrong service (Tester Present)
def _test_routine_control_wrongservice_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
def test_routine_control_wrongservice_no_exception(self):
self.wait_request_and_respond(b"\x7E\x11\x12\x34") # Valid but wrong service (Tester Present)
def _test_routine_control_wrongservice_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_routine_control_bad_controltype_exception(self):
self.wait_request_and_respond(b"\x71\x12\x12\x34") # Valid but wrong service (Tester Present)
def _test_routine_control_bad_controltype_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
def test_routine_control_bad_controltype_no_exception(self):
self.wait_request_and_respond(b"\x71\x12\x12\x34") # Valid but wrong service (Tester Present)
def _test_routine_control_bad_controltype_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_routine_control_bad_routine_id_exception(self):
self.wait_request_and_respond(b"\x71\x11\x12\x35") # Valid but wrong service (Tester Present)
def _test_routine_control_bad_routine_id_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
def test_routine_control_bad_routine_id_no_exception(self):
self.wait_request_and_respond(b"\x71\x11\x12\x35") # Valid but wrong service (Tester Present)
def _test_routine_control_bad_routine_id_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.routine_control(routine_id=0x1234, control_type=0x11, data=b'\x99\x88')
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_param(self):
pass
def _test_bad_param(self):
with self.assertRaises(ValueError):
self.udsclient.routine_control(routine_id=-1, control_type=1)
with self.assertRaises(ValueError):
self.udsclient.routine_control(routine_id=0x10000, control_type=1)
with self.assertRaises(ValueError):
self.udsclient.routine_control(routine_id=1, control_type=-1)
with self.assertRaises(ValueError):
self.udsclient.routine_control(routine_id=1, control_type=0x80)
with self.assertRaises(ValueError):
self.udsclient.routine_control(routine_id=1, control_type=1, data=123)
| {
"content_hash": "04047757341ff9a36f9adbdffd80d96b",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 105,
"avg_line_length": 50.19512195121951,
"alnum_prop": 0.7088192419825073,
"repo_name": "pylessard/python-udsoncan",
"id": "df1e212fb5c1bc48214a121b86466a12c4c0e856",
"size": "8232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/client/test_routine_control.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "835553"
}
],
"symlink_target": ""
} |
cars = 100
# 4.0 is a floating point
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print "There are", cars, "cars available."
print "There are only", drivers, "drivers available."
print "There will be", cars_not_driven, "empty cars today."
print "We have transport", carpool_capacity, "people today."
print "We have", passengers, "to carpool today."
print "We need to put about", average_passengers_per_car, "in each car."
| {
"content_hash": "9cf585162884fdac421dbd40e0a7b33f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.726962457337884,
"repo_name": "CodeCatz/litterbox",
"id": "9005af9a69abb0747d52da84106b4f791526a0ee",
"size": "656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Krista/ex4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927158"
},
{
"name": "JavaScript",
"bytes": "796463"
},
{
"name": "Python",
"bytes": "192149"
},
{
"name": "Ruby",
"bytes": "54"
}
],
"symlink_target": ""
} |
from CIM14.IEC61970.Generation.GenerationDynamics.SteamSupply import SteamSupply
class FossilSteamSupply(SteamSupply):
"""Fossil fueled boiler (e.g., coal, oil, gas)
"""
def __init__(self, boilerControlMode="following", superHeater2Capacity=0.0, auxPowerVersusFrequency=0.0, feedWaterIG=0.0, controlPED=0.0, throttlePressureSP=0.0, pressureCtrlIG=0.0, fuelSupplyDelay=0.0, controlPEB=0.0, controlTC=0.0, pressureFeedback=0, feedWaterPG=0.0, controlIC=0.0, controlPC=0.0, minErrorRateP=0.0, fuelSupplyTC=0.0, fuelDemandLimit=0.0, mechPowerSensorLag=0.0, pressureCtrlDG=0.0, maxErrorRateP=0.0, superHeaterPipePD=0.0, controlErrorBiasP=0.0, feedWaterTC=0.0, superHeater1Capacity=0.0, auxPowerVersusVoltage=0.0, pressureCtrlPG=0.0, *args, **kw_args):
"""Initialises a new 'FossilSteamSupply' instance.
@param boilerControlMode: The control mode of the boiler Values are: "following", "coordinated"
@param superHeater2Capacity: Secondary Superheater Capacity
@param auxPowerVersusFrequency: Off nominal frequency effect on auxiliary real power. Per unit active power variation versus per unit frequency variation.
@param feedWaterIG: Feedwater Integral Gain ratio
@param controlPED: Pressure Error Deadband
@param throttlePressureSP: Throttle Pressure Setpoint
@param pressureCtrlIG: Pressure Control Integral Gain ratio
@param fuelSupplyDelay: Fuel Delay
@param controlPEB: Pressure Error Bias ratio
@param controlTC: Time Constant
@param pressureFeedback: Pressure Feedback Indicator
@param feedWaterPG: Feedwater Proportional Gain ratio
@param controlIC: Integral Constant
@param controlPC: Proportional Constant
@param minErrorRateP: Active power Minimum Error Rate Limit
@param fuelSupplyTC: Fuel Supply Time Constant
@param fuelDemandLimit: Fuel Demand Limit
@param mechPowerSensorLag: Mechanical Power Sensor Lag
@param pressureCtrlDG: Pressure Control Derivative Gain ratio
@param maxErrorRateP: Active power Maximum Error Rate Limit
@param superHeaterPipePD: Superheater Pipe Pressure Drop Constant
@param controlErrorBiasP: Active power Error Bias ratio
@param feedWaterTC: Feedwater Time Constant rato
@param superHeater1Capacity: Drum/Primary Superheater Capacity
@param auxPowerVersusVoltage: Off nominal voltage effect on auxiliary real power. Per unit active power variation versus per unit voltage variation.
@param pressureCtrlPG: Pressure Control Proportional Gain ratio
"""
#: The control mode of the boiler Values are: "following", "coordinated"
self.boilerControlMode = boilerControlMode
#: Secondary Superheater Capacity
self.superHeater2Capacity = superHeater2Capacity
#: Off nominal frequency effect on auxiliary real power. Per unit active power variation versus per unit frequency variation.
self.auxPowerVersusFrequency = auxPowerVersusFrequency
#: Feedwater Integral Gain ratio
self.feedWaterIG = feedWaterIG
#: Pressure Error Deadband
self.controlPED = controlPED
#: Throttle Pressure Setpoint
self.throttlePressureSP = throttlePressureSP
#: Pressure Control Integral Gain ratio
self.pressureCtrlIG = pressureCtrlIG
#: Fuel Delay
self.fuelSupplyDelay = fuelSupplyDelay
#: Pressure Error Bias ratio
self.controlPEB = controlPEB
#: Time Constant
self.controlTC = controlTC
#: Pressure Feedback Indicator
self.pressureFeedback = pressureFeedback
#: Feedwater Proportional Gain ratio
self.feedWaterPG = feedWaterPG
#: Integral Constant
self.controlIC = controlIC
#: Proportional Constant
self.controlPC = controlPC
#: Active power Minimum Error Rate Limit
self.minErrorRateP = minErrorRateP
#: Fuel Supply Time Constant
self.fuelSupplyTC = fuelSupplyTC
#: Fuel Demand Limit
self.fuelDemandLimit = fuelDemandLimit
#: Mechanical Power Sensor Lag
self.mechPowerSensorLag = mechPowerSensorLag
#: Pressure Control Derivative Gain ratio
self.pressureCtrlDG = pressureCtrlDG
#: Active power Maximum Error Rate Limit
self.maxErrorRateP = maxErrorRateP
#: Superheater Pipe Pressure Drop Constant
self.superHeaterPipePD = superHeaterPipePD
#: Active power Error Bias ratio
self.controlErrorBiasP = controlErrorBiasP
#: Feedwater Time Constant rato
self.feedWaterTC = feedWaterTC
#: Drum/Primary Superheater Capacity
self.superHeater1Capacity = superHeater1Capacity
#: Off nominal voltage effect on auxiliary real power. Per unit active power variation versus per unit voltage variation.
self.auxPowerVersusVoltage = auxPowerVersusVoltage
#: Pressure Control Proportional Gain ratio
self.pressureCtrlPG = pressureCtrlPG
super(FossilSteamSupply, self).__init__(*args, **kw_args)
_attrs = ["boilerControlMode", "superHeater2Capacity", "auxPowerVersusFrequency", "feedWaterIG", "controlPED", "throttlePressureSP", "pressureCtrlIG", "fuelSupplyDelay", "controlPEB", "controlTC", "pressureFeedback", "feedWaterPG", "controlIC", "controlPC", "minErrorRateP", "fuelSupplyTC", "fuelDemandLimit", "mechPowerSensorLag", "pressureCtrlDG", "maxErrorRateP", "superHeaterPipePD", "controlErrorBiasP", "feedWaterTC", "superHeater1Capacity", "auxPowerVersusVoltage", "pressureCtrlPG"]
_attr_types = {"boilerControlMode": str, "superHeater2Capacity": float, "auxPowerVersusFrequency": float, "feedWaterIG": float, "controlPED": float, "throttlePressureSP": float, "pressureCtrlIG": float, "fuelSupplyDelay": float, "controlPEB": float, "controlTC": float, "pressureFeedback": int, "feedWaterPG": float, "controlIC": float, "controlPC": float, "minErrorRateP": float, "fuelSupplyTC": float, "fuelDemandLimit": float, "mechPowerSensorLag": float, "pressureCtrlDG": float, "maxErrorRateP": float, "superHeaterPipePD": float, "controlErrorBiasP": float, "feedWaterTC": float, "superHeater1Capacity": float, "auxPowerVersusVoltage": float, "pressureCtrlPG": float}
_defaults = {"boilerControlMode": "following", "superHeater2Capacity": 0.0, "auxPowerVersusFrequency": 0.0, "feedWaterIG": 0.0, "controlPED": 0.0, "throttlePressureSP": 0.0, "pressureCtrlIG": 0.0, "fuelSupplyDelay": 0.0, "controlPEB": 0.0, "controlTC": 0.0, "pressureFeedback": 0, "feedWaterPG": 0.0, "controlIC": 0.0, "controlPC": 0.0, "minErrorRateP": 0.0, "fuelSupplyTC": 0.0, "fuelDemandLimit": 0.0, "mechPowerSensorLag": 0.0, "pressureCtrlDG": 0.0, "maxErrorRateP": 0.0, "superHeaterPipePD": 0.0, "controlErrorBiasP": 0.0, "feedWaterTC": 0.0, "superHeater1Capacity": 0.0, "auxPowerVersusVoltage": 0.0, "pressureCtrlPG": 0.0}
_enums = {"boilerControlMode": "BoilerControlMode"}
_refs = []
_many_refs = []
| {
"content_hash": "cca9f96f838a78d19c9e7f961576cc29",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 677,
"avg_line_length": 57.73170731707317,
"alnum_prop": 0.7163779749331081,
"repo_name": "rwl/PyCIM",
"id": "96f3a90268f0051201abc2ca2678783fc1edacc5",
"size": "8201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/IEC61970/Generation/GenerationDynamics/FossilSteamSupply.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
lines = []
try:
while True:
line = raw_input()
lines.append(line)
except EOFError:
pass
import re
pattern = re.compile('[aiueoAIUEO]')
for line in lines:
if pattern.search(line):
print(line.upper())
else:
print(line.lower()) | {
"content_hash": "aee2eb601fab04743ebdd5d2b4577874",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 36,
"avg_line_length": 16.6,
"alnum_prop": 0.6465863453815262,
"repo_name": "Fukayanegi/CodeIQ_py",
"id": "3a45859f63abbe24c84eae9f9f8ae8eba775b02a",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tb/big_mother_little_child.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4838"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.