text stringlengths 4 1.02M | meta dict |
|---|---|
import unittest
import maillib
class BasicTests(unittest.TestCase):
def test_body(self):
msg = maillib.Message.from_string(RAW_MESSAGE)
self.assertEqual(msg.body, u'test\n')
def test_html(self):
msg = maillib.Message.from_string(RAW_MESSAGE)
self.assertEqual(msg.html, u'test\n')
def test_header(self):
msg = maillib.Message.from_string(RAW_MESSAGE)
self.assertEqual(msg.subject, u'test')
RAW_MESSAGE = """\
MIME-Version: 1.0
Received: by 10.142.223.2 with HTTP; Mon, 2 Mar 2009 20:00:55 -0800 (PST)
Date: Mon, 2 Mar 2009 22:00:55 -0600
Delivered-To: jkocherhans@gmail.com
Message-ID: <c90890f0903022000g7ddb0965i2360bcbeda6c8e23@mail.gmail.com>
Subject: test
From: Joseph Kocherhans <jkocherhans@gmail.com>
To: joseph@jkocherhans.com
Content-Type: multipart/alternative; boundary=00032556403a5fd82604642efb44
--00032556403a5fd82604642efb44
Content-Type: text/plain; charset=ISO-8859-1
Content-Transfer-Encoding: 7bit
test
--00032556403a5fd82604642efb44
Content-Type: text/html; charset=ISO-8859-1
Content-Transfer-Encoding: 7bit
test
--00032556403a5fd82604642efb44--
"""
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6bcff6b130f635d98bca401dbb9025b1",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 27.022727272727273,
"alnum_prop": 0.7325483599663583,
"repo_name": "jkocherhans/maillib",
"id": "3850083bd4cff9eb065e6d9dffb3ef94e0240191",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maillib/tests/multipart.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "44386"
}
],
"symlink_target": ""
} |
from os import mkdir
from os.path import join
from fsyscall.share import FSLAVE_SYSCALLS, SYSCALLS, Variable, \
bufsize_of_datatype, data_of_argument, \
concrete_datatype_of_abstract_datatype, \
drop_pointer, drop_prefix, make_cmd_name, \
make_decl, make_payload_size_expr, \
opt_of_syscall, partial_print, pickup_sources, \
print_caution, print_locals, write_c_footer, \
write_makefile
def make_formal_arguments_of_execute_call(syscall, a):
datatype = a.datatype
asterisk = (1 if data_of_argument(syscall, a).is_array else 0) * "*"
name = a.name
return "{datatype}{asterisk}{name}".format(**locals())
def print_fslave_head(p, syscall):
args = ["struct slave_thread *slave_thread", "int *retval", "int *errnum"]
for a in syscall.output_args:
args.append(make_formal_arguments_of_execute_call(syscall, a))
name = drop_prefix(syscall.name)
print_caution(p)
p("""\
#include <sys/types.h>
#include <sys/event.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include <fsyscall/private.h>
#include <fsyscall/private/command.h>
#include <fsyscall/private/encode.h>
#include <fsyscall/private/fslave.h>
#include <fsyscall/private/io.h>
#include <fsyscall/private/io_or_die.h>
static void
execute_call({args})
{{
""".format(args=", ".join(args)))
def make_fslave_payload_size_expr(syscall):
if len(syscall.input_args) == 0:
return "0"
terms = []
for a in syscall.args:
if data_of_argument(syscall, a).out:
continue
if a.datatype == "void *":
terms.append(SYSCALLS[syscall.name][a.name].size)
continue
if a.datatype == "struct iovec *":
terms.append("{name}_payload_size".format(**vars(a)))
continue
terms.append("{name}_len".format(**vars(a)))
return " + ".join(terms)
def print_fslave_call(p, print_newline, syscall):
local_vars = []
for datatype, name in (
("sigset_t", "oset"),
("payload_size_t", "payload_size"),
("payload_size_t", "actual_payload_size"),
("struct io *", "io")):
local_vars.append(Variable(datatype, name))
input_arguments = syscall.input_args
for a in input_arguments:
if a.datatype == "void *":
local_vars.append(Variable(a.datatype, a.name))
continue
if a.datatype == "struct iovec *":
for datatype, name in (
(a.datatype, a.name),
("payload_size_t *", "{name}_iov_len_len"),
("int", "i"),
("payload_size_t", "{name}_payload_size")):
local_vars.append(Variable(datatype, name.format(**vars(a))))
continue
for datatype, name in (
("payload_size_t", "{name}_len"),
("{datatype}", "{name}")):
d = vars(a)
v = Variable(datatype.format(**d), name.format(**d))
local_vars.append(v)
print_locals(p, local_vars)
print_newline()
p("""\
\tio = &slave_thread->fsth_io;
\tpayload_size = read_payload_size(io);
""")
print_newline()
for a in syscall.input_args:
name = a.name
if a.datatype == "void *":
size = SYSCALLS[syscall.name][name].size
p("""\
\t{name} = alloca({size});
\tread_or_die(io, {name}, {size});
""".format(**locals()))
continue
if a.datatype == "struct iovec *":
name = a.name
size = data_of_argument(syscall, a).size
p("""\
\t{name} = (struct iovec *)alloca(sizeof(*{name}) * {size});
\t{name}_iov_len_len = (payload_size_t *)alloca(sizeof(int) * {size});
\tfor (i = 0; i < {size}; i++) {{
\t\t{name}[i].iov_len = read_uint64(io, &{name}_iov_len_len[i]);
\t\t{name}[i].iov_base = alloca({name}[i].iov_len);
\t\tread_or_die(io, {name}[i].iov_base, {name}[i].iov_len);
\t}}
""".format(**locals()))
continue
f = {
"char *": "read_string",
"long": "read_int64",
"int": "read_int32",
"u_int": "read_uint32",
"off_t": "read_int64",
"size_t": "read_uint64" }[a.datatype]
assignment = "{name} = {f}(io, &{name}_len)".format(**locals())
opt = opt_of_syscall(FSLAVE_SYSCALLS, syscall, a)
if opt is not None:
p("""\
\tif ({opt})
\t\t{assignment};
\telse
\t\t{name} = {name}_len = 0;
""".format(**locals()))
else:
p("""\
\t{assignment};
""".format(**locals()))
if 0 < len(syscall.args):
print_newline()
for a in [a for a in input_arguments if a.datatype == "struct iovec *"]:
name = a.name
size = data_of_argument(syscall, a).size
p("""\
\t{name}_payload_size = 0;
\tfor (i = 0; i < {size}; i++)
\t\t{name}_payload_size += {name}_iov_len_len[i] + {name}[i].iov_len;
""".format(**locals()))
continue
payload_size = make_fslave_payload_size_expr(syscall)
p("""\
\tactual_payload_size = {payload_size};
\tdie_if_payload_size_mismatched(payload_size, actual_payload_size);
""".format(**locals()))
print_newline()
malloced = False
out_arguments = syscall.output_args
for a in out_arguments:
data = data_of_argument(syscall, a)
if not data.is_array:
continue
name = a.name
datatype = a.datatype
size = data.size
p("""\
\t*{name} = ({datatype})malloc({size});
""".format(**locals()))
malloced = True
if malloced:
print_newline()
args = []
for a in syscall.args:
data = data_of_argument(syscall, a)
ast = (1 if data.out and data.is_array else 0) * "*"
args.append("{ast}{name}".format(ast=ast, name=a.name))
p("""\
\tsuspend_signal(slave_thread, &oset);
\t*retval = {name}({args});
\t*errnum = errno;
\tresume_signal(slave_thread, &oset);
""".format(name=drop_prefix(syscall.name), args=", ".join(args)))
for a in syscall.args:
if (a.datatype != "char *") or data_of_argument(syscall, a).out:
continue
p("""\
\tfree({name});
""".format(**vars(a)))
p("""\
}}
""".format(**locals()))
def get_fslave_return_func(syscall):
return "return_int" if syscall.rettype == "int" else "return_ssize"
def make_formal_arguments_of_execute_return(syscall, a):
data = data_of_argument(syscall, a)
dt = a.datatype
datatype = drop_pointer(dt) if data.is_atom else dt
name = a.name
return "{datatype} {name}".format(**locals())
def print_fslave_return(p, print_newline, syscall):
output_args = syscall.output_args
args = ", ".join([make_formal_arguments_of_execute_return(syscall, a)
for a in output_args])
p("""\
static void
execute_return(struct slave_thread *slave_thread, int retval, int errnum, {args})
{{
""".format(**locals()))
local_vars = [Variable(datatype, name, size) for datatype, name, size in (
("payload_size_t", "payload_size", None),
("char", "retval_buf", bufsize_of_datatype(syscall.rettype)),
("int", "retval_len", None),
("struct io *", "io", None))]
out_arguments = syscall.output_args
for a in out_arguments:
datatype = a.datatype
if a.datatype in ("char *", "void *"):
# in case of an array
continue
st = data_of_argument(syscall, a).struct
append = local_vars.append
if st is not None:
for datatype, name in st.expand_all_members(a.name):
fmt = "{name}_len"
append(Variable("int", fmt.format(**locals())))
fmt = "{name}_buf"
size = bufsize_of_datatype(datatype)
append(Variable("char", fmt.format(**locals()), size))
continue
name = a.name
size = bufsize_of_datatype(drop_pointer(datatype))
append(Variable("int", "{name}_len".format(**locals())))
append(Variable("char", "{name}_buf".format(**locals()), size))
print_locals(p, local_vars)
print_newline()
cmd_name = make_cmd_name(syscall.name)
return_func = get_fslave_return_func(syscall)
p("""\
\tif (retval == -1) {{
\t\t{return_func}(slave_thread, {cmd_name}_RETURN, retval, errnum);
\t\treturn;
\t}}
""".format(**locals()))
print_newline()
p("""\
\tretval_len = encode_{datatype}(retval, retval_buf, array_sizeof(retval_buf));
""".format(datatype=concrete_datatype_of_abstract_datatype(syscall.rettype)))
for a in out_arguments:
if a.datatype in ("char *", "void *"):
continue
st = data_of_argument(syscall, a).struct
if st is not None:
for datatype, var, name in st.zip_members(a.name, "->"):
struct_name = a.name
t = concrete_datatype_of_abstract_datatype(datatype)
p("""\
\t{var}_len = encode_{t}({name}, {var}_buf, array_sizeof({var}_buf));
""".format(**locals()))
continue
name = a.name
t = drop_pointer(a.datatype)
datatype = concrete_datatype_of_abstract_datatype(t)
p("""\
\t{name}_len = encode_{datatype}({name}, {name}_buf, sizeof({name}_buf));
""".format(**locals()))
payload_size = make_payload_size_expr(syscall, out_arguments, "retsize")
p("""\
\tpayload_size = retval_len + {payload_size};
\tio = &slave_thread->fsth_io;
\twrite_command(io, {cmd_name}_RETURN);
\twrite_payload_size(io, payload_size);
\twrite_or_die(io, retval_buf, retval_len);
""".format(**locals()))
for a in out_arguments:
if a.datatype in ("char *", "void *"):
name = a.name
size = data_of_argument(syscall, a).retsize
p("""\
\twrite_or_die(io, {name}, {size});
""".format(**locals()))
continue
st = data_of_argument(syscall, a).struct
if st is not None:
for _, name in st.expand_all_members(a.name):
p("""\
\twrite_or_die(io, {name}_buf, {name}_len);
""".format(**locals()))
continue
name = a.name
p("""\
\twrite_or_die(io, {name}_buf, {name}_len);
""".format(**locals()))
newlined = False
for a in out_arguments:
data = data_of_argument(syscall, a)
if not data.is_array:
continue
if not newlined:
print_newline()
newlined = True
p("""\
\tfree({name});
""".format(**vars(a)))
p("""\
}}
""".format(**locals()))
def make_execute_return_actual_arguments(syscall, args):
exprs = []
for a in args:
st = data_of_argument(syscall, a).struct
fmt = "&{name}" if st is not None else "{name}"
exprs.append(fmt.format(**vars(a)))
return ", ".join(exprs)
def print_fslave_main(p, print_newline, syscall):
name = drop_prefix(syscall.name)
p("""\
void
process_{name}(struct slave_thread *slave_thread)
{{
""".format(**locals()))
local_vars = []
for datatype, name in (("int", "retval"), ("int", "errnum")):
local_vars.append(Variable(datatype, name))
out_arguments = syscall.output_args
if len(out_arguments) == 0:
cmd_name = make_cmd_name(syscall.name)
print_locals(p, local_vars)
print_newline()
return_func = get_fslave_return_func(syscall)
p("""\
\texecute_call(slave_thread, &retval, &errnum);
\t{return_func}(slave_thread, {cmd_name}_RETURN, retval, errnum);
}}
""".format(**locals()))
return
for a in out_arguments:
data = data_of_argument(syscall, a)
datatype = a.datatype if data.is_array else drop_pointer(a.datatype)
local_vars.append(Variable(datatype, a.name))
print_locals(p, local_vars)
print_newline()
call_args = ", ".join(["&{name}".format(**vars(a)) for a in out_arguments])
ret_args = make_execute_return_actual_arguments(syscall, out_arguments)
p("""\
\texecute_call(slave_thread, &retval, &errnum, {call_args});
\texecute_return(slave_thread, retval, errnum, {ret_args});
}}
""".format(**locals()))
def write_fslave(dirpath, syscalls):
for syscall in [sc for sc in syscalls if sc.name in FSLAVE_SYSCALLS]:
name = drop_prefix(syscall.name)
path = join(dirpath, "fslave_{name}.c".format(**locals()))
with open(path, "w") as fp:
p, print_newline = partial_print(fp)
print_fslave_head(p, syscall)
print_fslave_call(p, print_newline, syscall)
print_newline()
if 0 < len(syscall.output_args):
print_fslave_return(p, print_newline, syscall)
print_newline()
print_fslave_main(p, print_newline, syscall)
def write_dispatch(dirpath, syscalls):
with open(join(dirpath, "dispatch.inc"), "w") as fp:
p, _ = partial_print(fp)
for syscall in syscalls:
if syscall.name not in SYSCALLS:
continue
cmd = make_cmd_name(syscall.name)
name = drop_prefix(syscall.name)
p("""\
\t\t\tcase {cmd}_CALL:
\t\t\t\tprocess_{name}(slave_thread);
\t\t\t\tbreak;
""".format(**locals()))
write_c_footer(p)
def write_fslave_makefile(path, syscalls):
write_makefile(path, pickup_sources(syscalls, "fslave_"))
def write_proto(dirpath, syscalls):
try:
mkdir(dirpath)
except OSError:
pass
with open(join(dirpath, "proto.h"), "w") as fp:
p, print_newline = partial_print(fp)
print_caution(p)
p("""\
#if !defined(FSYSCALL_PRIVATE_FSLAVE_PROTO_H_INCLUDED)
#define FSYSCALL_PRIVATE_FSLAVE_PROTO_H_INCLUDED
""")
print_newline()
for syscall in syscalls:
if syscall.name not in FSLAVE_SYSCALLS:
continue
p("""\
void process_{name}(struct slave_thread *);
""".format(name=drop_prefix(syscall.name)))
print_newline()
p("""\
#endif
""")
# vim: tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python
| {
"content_hash": "475a0336dd35a0f8ce833e5889d36a17",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 81,
"avg_line_length": 32.47178329571106,
"alnum_prop": 0.5635036496350365,
"repo_name": "SumiTomohiko/fsyscall2",
"id": "8864f1bb827f6c0a4bf0c5667fac5754428f388b",
"size": "14386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/lib/python/fsyscall/slave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "20301"
},
{
"name": "C",
"bytes": "663318"
},
{
"name": "C++",
"bytes": "3059"
},
{
"name": "Groovy",
"bytes": "1472"
},
{
"name": "Java",
"bytes": "398186"
},
{
"name": "Makefile",
"bytes": "23866"
},
{
"name": "Python",
"bytes": "1320"
},
{
"name": "Shell",
"bytes": "32639"
}
],
"symlink_target": ""
} |
import importlib
import traceback
from pathlib import Path
from functools import partial
from multiprocessing import cpu_count
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import yaml
import requests
from pandas import DataFrame, concat
from .anomaly import detect_anomaly_all, detect_stale_columns
from .cast import column_converters
from .constants import SRC, CACHE_URL
from .concurrent import process_map
from .data_source import DataSource
from .error_logger import ErrorLogger
from .io import read_file, read_table, fuzzy_text, export_csv, parse_dtype, pbar
from .lazy_property import lazy_property
from .utils import combine_tables, drop_na_records, filter_output_columns
class DataPipeline(ErrorLogger):
"""
A data pipeline is a collection of individual [DataSource]s which produce a full table ready
for output. This is a very thin wrapper that pulls the data sources and combines their outputs.
A data pipeline is responsible for loading the auxiliary datasets that are passed to the
individual data sources. DataSource objects can load data themselves, but if the same auxiliary
dataset is used by many of them, then it is more efficient to load it here.
"""
def __init__(
self,
name: str,
schema: Dict[str, type],
auxiliary: Dict[str, Union[Path, str]],
data_sources: List[DataSource],
config: Dict[str, Any],
):
"""
Arguments:
name: The name of this module
table: The name of the table corresponding to this pipeline
schema: Names and corresponding dtypes of output columns.
auxiliary: Auxiliary datasets passed to the pipelines during processing
data_sources: List of data sources (initialized with the appropriate config) executed
in order.
"""
super().__init__()
self.name: str = name
self.schema: Dict[str, Any] = schema
self.data_sources: List[DataSource] = data_sources
self.table: str = name.replace("_", "-")
self._auxiliary: Dict[str, Union[Path, str]] = auxiliary
self.config = config
@lazy_property
def auxiliary_tables(self):
""" Auxiliary datasets passed to the pipelines during processing """
# Metadata table can be overridden but must always be present
auxiliary = {"metadata": SRC / "data" / "metadata.csv", **self._auxiliary}
# Load the auxiliary tables into memory
aux = {name: read_file(table) for name, table in auxiliary.items()}
# Precompute some useful transformations in the auxiliary input files
aux["metadata"]["match_string_fuzzy"] = aux["metadata"].match_string.apply(fuzzy_text)
for column in ("subregion1", "subregion2", "locality"):
# Apply fuzzy comparison for name fields
aux["metadata"][f"{column}_name_fuzzy"] = aux["metadata"][f"{column}_name"].apply(
fuzzy_text
)
return aux
@staticmethod
def load(name: str) -> "DataPipeline":
"""
Load a data pipeline by reading its configuration at the expected path from the given name.
Arguments:
name: Name of the data pipeline, which is the same as the name of the output table but
replacing underscores (`_`) with dashes (`-`).
Returns:
DataPipeline: The DataPipeline object corresponding to the input name.
"""
# Read config from the yaml file
config_path = SRC / "pipelines" / name / "config.yaml"
with open(config_path, "r") as fd:
config_yaml = yaml.safe_load(fd)
# The pipeline's schema and auxiliary tables are part of the config
schema = {name: parse_dtype(dtype) for name, dtype in config_yaml["schema"].items()}
auxiliary = {name: SRC / path for name, path in config_yaml.get("auxiliary", {}).items()}
data_sources = []
for source_config in config_yaml["sources"]:
# Add the job group to all configs
automation_config = source_config.get("automation", {})
source_config["automation"] = automation_config
source_config["automation"]["job_group"] = automation_config.get("job_group", "default")
# Use reflection to create an instance of the corresponding DataSource class
module_tokens = source_config["class"].split(".")
class_name = module_tokens[-1]
module_name = ".".join(module_tokens[:-1])
module = importlib.import_module(module_name)
# Create the DataSource class with the appropriate config
data_sources.append(getattr(module, class_name)(source_config))
return DataPipeline(name, schema, auxiliary, data_sources, config_yaml)
def output_table(self, data: DataFrame) -> DataFrame:
"""
This function performs the following operations:
1. Filters out columns not in the output schema
2. Converts each column to the appropriate type
3. Sorts the values based on the column order
4. Outputs the resulting data
"""
output_columns = list(self.schema.keys())
# Make sure all columns are present and have the appropriate type
for column, converter in column_converters(self.schema).items():
if column not in data:
data[column] = None
data[column] = data[column].apply(converter)
# Filter only output columns and output the sorted data
return drop_na_records(data[output_columns], ["date", "key"]).sort_values(output_columns)
@staticmethod
def _run_wrapper(
output_folder: Path,
cache: Dict[str, str],
aux: Dict[str, DataFrame],
data_source: DataSource,
**source_opts,
) -> Optional[DataFrame]:
""" Workaround necessary for multiprocess pool, which does not accept lambda functions """
try:
return data_source.run(output_folder, cache, aux, **source_opts)
except Exception:
data_source_name = data_source.__class__.__name__
data_source.log_error(
"Error running data source.",
source_name=data_source_name,
config=data_source.config,
traceback=traceback.format_exc(),
)
return None
def parse(
self, output_folder: Path, process_count: int = None, **source_opts
) -> Iterable[Tuple[DataSource, DataFrame]]:
"""
Performs the fetch and parse steps for each of the data sources in this pipeline.
Arguments:
output_folder: Root path of the outputs where "snapshot", "intermediate" and "tables"
will be created and populated with CSV files.
process_count: Maximum number of processes to run in parallel.
Returns:
Iterable[Tuple[DataSource, DataFrame]]: Pairs of <data source, results> for each data
source, where the results are the output of `DataSource.parse()`.
"""
# Read the cache directory from our cloud storage
try:
cache = requests.get("{}/sitemap.json".format(CACHE_URL), timeout=60).json()
except:
cache = {}
self.log_error("Cache unavailable")
# Make a copy of the auxiliary table to prevent modifying it for everyone, but this way
# we allow for local modification (which might be wanted for optimization purposes)
aux_copy = {name: df.copy() for name, df in self.auxiliary_tables.items()}
# Create a function to be used during mapping. The nestedness is an unfortunate outcome of
# the multiprocessing module's limitations when dealing with lambda functions, coupled with
# the "sandboxing" we implement to ensure resiliency.
map_func = partial(DataPipeline._run_wrapper, output_folder, cache, aux_copy, **source_opts)
# Default to using as many processes as CPUs
if process_count is None:
process_count = cpu_count()
# Used to display progress during processing
data_sources_count = len(self.data_sources)
progress_label = f"Run {self.name} pipeline"
map_opts = dict(total=data_sources_count, desc=progress_label)
# If the process count is less than one, run in series (useful to evaluate performance)
if process_count <= 1 or data_sources_count <= 1:
map_result = pbar(map(map_func, self.data_sources), **map_opts)
else:
map_opts.update(dict(max_workers=process_count))
map_result = process_map(map_func, self.data_sources, **map_opts)
# Get all the pipeline outputs
# This operation is parallelized but output order is preserved
yield from zip(self.data_sources, map_result)
def _save_intermediate_results(
self,
intermediate_folder: Path,
intermediate_results: Iterable[Tuple[DataSource, DataFrame]],
) -> None:
for data_source, result in intermediate_results:
if result is not None:
self.log_info(f"Exporting results from {data_source.__class__.__name__}")
file_name = f"{data_source.uuid(self.table)}.csv"
export_csv(result, intermediate_folder / file_name, schema=self.schema)
else:
data_source_name = data_source.__class__.__name__
self.log_error(
"No output while saving intermediate results",
source_name=data_source_name,
source_config=data_source.config,
)
def _load_intermediate_results(
self, intermediate_folder: Path
) -> Iterable[Tuple[DataSource, DataFrame]]:
for data_source in self.data_sources:
intermediate_path = intermediate_folder / f"{data_source.uuid(self.table)}.csv"
try:
yield (data_source, read_table(intermediate_path, schema=self.schema))
except Exception as exc:
data_source_name = data_source.__class__.__name__
self.log_error(
"Failed to load intermediate output",
source_name=data_source_name,
source_config=data_source.config,
exception=exc,
)
def _split_combine_inputs(
self, intermediate_results: Iterable[Tuple[DataSource, DataFrame]], combine_chunk_size: int
) -> List[DataFrame]:
combine_inputs = concat(intermediate_results)
combine_inputs_columns = combine_inputs.columns
idx_col = "date" if "date" in combine_inputs.columns else "key"
combine_inputs.dropna(subset=[idx_col], inplace=True)
combine_inputs.set_index(idx_col, inplace=True)
index = combine_inputs.index.unique()
new_chunk = lambda: DataFrame(columns=combine_inputs_columns).set_index(idx_col)
current_chunk = new_chunk()
current_chunk_len = 0
for idx in sorted(index):
idx_chunk = combine_inputs.loc[idx]
idx_chunk_len = len(idx_chunk)
if idx_chunk_len > 0 and current_chunk_len + idx_chunk_len > combine_chunk_size:
current_chunk_len = 0
yield current_chunk.reset_index()
current_chunk = new_chunk()
current_chunk_len += idx_chunk_len
current_chunk = current_chunk.append(idx_chunk)
if len(current_chunk) > 0:
yield current_chunk.reset_index()
def combine(
self,
intermediate_results: Iterable[Tuple[DataSource, DataFrame]],
process_count: int = None,
combine_chunk_size: int = 2 ** 16,
) -> DataFrame:
"""
Combine all the provided intermediate results into a single DataFrame, giving preference to
values coming from the latter results.
Arguments:
intermediate_results: collection of results from individual data sources.
process_count: Maximum number of processes to run in parallel.
combine_chunk_size: Maximum number of rows to send to combine grouping function.
"""
# Default to using as many processes as CPUs
if process_count is None:
process_count = cpu_count()
# Combine all intermediate outputs into a single DataFrame
output_chunk_count = 1
if not intermediate_results:
self.log_error("Empty result for data pipeline {}".format(self.name))
pipeline_output = [DataFrame(columns=self.schema.keys())]
else:
# Get rid of all columns which are not part of the output to speed up data combination
intermediate_tables = (
result[filter_output_columns(result.columns, self.schema)]
for _, result in intermediate_results
)
# Parallelize combine step into N processes
combine_inputs = self._split_combine_inputs(intermediate_tables, combine_chunk_size)
map_iter = [[chunk] for chunk in combine_inputs]
map_func = partial(combine_tables, index=["date", "key"])
map_opts = dict(max_workers=process_count, desc=f"Combining {self.name} outputs")
pipeline_output = process_map(map_func, map_iter, **map_opts)
output_chunk_count = len(map_iter)
del combine_inputs
# Return data using the pipeline's output parameters
map_opts = dict(
total=output_chunk_count,
max_workers=process_count,
desc=f"Cleaning {self.name} outputs",
)
return concat(process_map(self.output_table, pipeline_output, **map_opts))
def verify(
self, pipeline_output: DataFrame, level: str = "simple", process_count: int = cpu_count()
) -> DataFrame:
"""
Perform verification tasks on the data pipeline combined outputs.
Arguments:
pipeline_output: Output of `DataPipeline.combine()`.
process_count: Maximum number of processes to run in parallel.
verify_level: Level of anomaly detection to perform on outputs. Possible values are:
None, "simple" and "full".
Returns:
DataFrame: same as `pipeline_output`.
"""
# Skip anomaly detection unless requested
if level == "simple":
# Validate that the table looks good
detect_anomaly_all(self.schema, pipeline_output, [self.name])
if level == "full":
# Perform stale column detection for each known key
map_iter = pipeline_output.key.unique()
# TODO: convert into a regular function since lambdas cannot be pickled
map_func = lambda key: detect_stale_columns(
self.schema, pipeline_output[pipeline_output.key == key], (self.name, key)
)
progress_label = f"Verify {self.name} pipeline"
if process_count <= 1 or len(map_iter) <= 1:
map_func = pbar(map(map_func, map_iter), total=len(map_iter), desc=progress_label)
else:
map_func = process_map(map_func, map_iter, desc=progress_label)
# Consume the results
_ = list(map_func)
return pipeline_output
def run(
self,
output_folder: Path,
process_count: int = cpu_count(),
verify_level: str = "simple",
**source_opts,
) -> DataFrame:
"""
Main method which executes all the associated [DataSource] objects and combines their
outputs.
Arguments:
output_folder: Root path of the outputs where "snapshot", "intermediate" and "tables"
will be created and populated with CSV files.
process_count: Maximum number of processes to run in parallel.
verify_level: Level of anomaly detection to perform on outputs. Possible values are:
None, "simple" and "full".
source_opts: Options to relay to the DataSource.run() method.
Returns:
DataFrame: Processed and combined outputs from all the individual data sources into a
single table.
"""
# TODO: break out fetch & parse steps
intermediate_results = self.parse(output_folder, process_count=process_count, **source_opts)
# Save all intermediate results (to allow for reprocessing)
intermediate_folder = output_folder / "intermediate"
self._save_intermediate_results(intermediate_folder, intermediate_results)
# Re-load all intermediate results
intermediate_results = self._load_intermediate_results(intermediate_folder)
# Combine all intermediate results into a single dataframe
# NOTE: Limit the number of processes to avoid OOM in big datasets
pipeline_output = self.combine(intermediate_results, process_count=min(process_count, 4))
# Perform anomaly detection on the combined outputs
pipeline_output = self.verify(
pipeline_output, level=verify_level, process_count=process_count
)
return pipeline_output
| {
"content_hash": "b2889b405d0477e5bbaf70997bd13c96",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 100,
"avg_line_length": 43.04714640198511,
"alnum_prop": 0.6228960110675582,
"repo_name": "GoogleCloudPlatform/covid-19-open-data",
"id": "286644c21d73cc305a64013125ad68ae3ae21587",
"size": "17924",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/lib/pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1839"
},
{
"name": "Python",
"bytes": "901210"
},
{
"name": "Shell",
"bytes": "10370"
}
],
"symlink_target": ""
} |
from sklearn import neighbors, svm, linear_model
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
class ML:
def __init__(self, pred, min_shift = 0.01):
"""
Makes the ML class which contains the machine learning methods.
:param pred: pandas DataFrame of technical indicators given by the Predictors method
:param min_shift: float, the minimum price shift.
"""
self.pred = pred
self.indicators = {}
self.model = []
self.min_shift = min_shift
def randomForset_learn(self, n, ndays, p={'meanfractal', 'loc_vol'}, y_colums=None,forward_look=0):
"""
Creates an sklearn model and backtests it for given parameters in a training range.
:param n: int, Number of estimators on the random forest model
:param ndays: int, Number of days to use as training
:param p: The names of the technical indicators you want to use
:param y_colums: What does the output of the model check against.
:param forward_look: int, How many days to look forward in time
:return: pandas dataframe containing predictions.
"""
# Make predictors
pred = self.pred
if y_colums is None:
pred["switch"] = np.where((pred.make_splits(5, inplace=False).shift(1) / pred.make_splits(5, inplace=False))
> (1.0025 / 0.9975) + self.min_shift, 1, 0)
else:
pred["switch"] = pred[y_colums]
self.indicators = p
# Make model
clf = RandomForestClassifier(n_estimators=n)
results = pd.DataFrame()
accuracy = []
# Backtest all data using a rolling look forward method.
for i in range(ndays, len(pred.index)-forward_look):
# We perform a 80/20 split on the data
ind = int(np.round(ndays*0.8))
X_TRAIN = pred.ix[(i - ndays):(i - ndays + ind),p]
if forward_look > 0:
idx = pred.ix[(i - ndays):(i - ndays + ind+forward_look)].index
Y_TRAIN = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TRAIN = pred.switch.ix[X_TRAIN.index]
X_TEST = pred.ix[(i - ndays + ind):i,p]
if forward_look > 0:
idx = pred.ix[(i - ndays + ind):(i+forward_look),p].index
Y_TEST = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TEST = pred.switch.ix[X_TEST.index]
# Fit the model
clf.fit(X_TRAIN, Y_TRAIN)
# Predict
predicted = clf.predict(X_TEST)
# Accuracy
a = clf.score(X_TEST, Y_TEST)
accuracy.append(a)
# Make results
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
X_TEST["ACC"] = a
results = results.append(X_TEST.iloc[0])
self.model = clf
print("RandomForest model accuracy: " + str(np.mean(accuracy)))
return results
def knn_learn(self, n, ndays, p={'meanfractal', 'loc_vol'}, y_colums=None,forward_look=0):
"""
Creates an sklearn model and backtests it for given parameters in a training range.
:param n: int, Number of nearest neighbours in the KNN model
:param ndays: int, Number of days to use as training
:param p: The names of the technical indicators you want to use
:param y_colums: What does the output of the model check against.
:param forward_look: int, How many days to look forward in time
:return: pandas dataframe containing predictions.
"""
pred = self.pred
if y_colums is None:
pred["switch"] = np.where((pred.make_splits(5, inplace=False).shift(1) / pred.make_splits(5, inplace=False))
> (1.0025 / 0.9975) + self.min_shift, 1, 0)
else:
pred["switch"] = pred[y_colums]
self.indicators = p
clf = neighbors.KNeighborsClassifier(n_neighbors=n)
results = pd.DataFrame()
accuracy = []
for i in range(ndays, len(pred.index)-forward_look):
# We perform a 80/20 split on the data
ind = int(np.round(ndays*0.8))
X_TRAIN = pred.ix[(i - ndays):(i - ndays + ind),p]
if forward_look > 0:
idx = pred.ix[(i - ndays):(i - ndays + ind+forward_look)].index
Y_TRAIN = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TRAIN = pred.switch.ix[X_TRAIN.index]
X_TEST = pred.ix[(i - ndays + ind):i,p]
if forward_look > 0:
idx = pred.ix[(i - ndays + ind):(i+forward_look),p].index
Y_TEST = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TEST = pred.switch.ix[X_TEST.index]
clf.fit(X_TRAIN, Y_TRAIN)
predicted = clf.predict(X_TEST)
a = clf.score(X_TEST, Y_TEST)
accuracy.append(a)
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
X_TEST["ACC"] = a
results = results.append(X_TEST.iloc[0])
self.model = clf
print("KNN model accuracy: " + str(np.mean(accuracy)))
return results
def linear_learn(self, c, ndays, p={'meanfractal', 'loc_vol'},y_colums=None,forward_look=0):
"""
Creates an sklearn model and backtests it for given parameters in a training range.
:param c: float, linear tuning parameter
:param ndays: int, Number of days to use as training
:param p: The names of the technical indicators you want to use
:param y_colums: What does the output of the model check against.
:param forward_look: int, How many days to look forward in time
:return: pandas dataframe containing predictions.
"""
pred = self.pred
if y_colums is None:
pred["switch"] = np.where((pred.make_splits(5, inplace=False).shift(1) / pred.make_splits(5, inplace=False))
> (1.0025 / 0.9975) + self.min_shift, 1, 0)
else:
pred["switch"] = pred[y_colums]
self.indicators = p
clf = linear_model.LogisticRegression(C=c)
results = pd.DataFrame()
accuracy = []
for i in range(ndays, len(pred.index)-forward_look):
# We perform a 80/20 split on the data
ind = int(np.round(ndays*0.8))
X_TRAIN = pred.ix[(i - ndays):(i - ndays + ind),p]
if forward_look > 0:
idx = pred.ix[(i - ndays):(i - ndays + ind+forward_look)].index
Y_TRAIN = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TRAIN = pred.switch.ix[X_TRAIN.index]
X_TEST = pred.ix[(i - ndays + ind):i,p]
if forward_look > 0:
idx = pred.ix[(i - ndays + ind):(i+forward_look),p].index
Y_TEST = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TEST = pred.switch.ix[X_TEST.index]
clf.fit(X_TRAIN, Y_TRAIN)
predicted = clf.predict(X_TEST)
a = clf.score(X_TEST, Y_TEST)
accuracy.append(a)
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
X_TEST["ACC"] = a
results = results.append(X_TEST.iloc[0])
self.model = clf
print("Linear model accuracy: " + str(np.mean(accuracy)))
return results
def SVC_learn(self, ndays, c = 1, kernel='rbf', p={'meanfractal', 'loc_vol'},y_colums=None,forward_look=0):
pred = self.pred
if y_colums is None:
pred["switch"] = np.where((pred.make_splits(5, inplace=False).shift(1) / pred.make_splits(5, inplace=False))
> (1.0025 / 0.9975) + self.min_shift, 1, 0)
else:
pred["switch"] = pred[y_colums]
self.indicators = p
clf = svm.SVC(kernel=kernel, gamma=0.7, C=c,probability=True)
results = pd.DataFrame()
accuracy = []
for i in range(ndays, len(pred.index)-forward_look):
# We perform a 80/20 split on the data
ind = int(np.round(ndays*0.8))
X_TRAIN = pred.ix[(i - ndays):(i - ndays + ind),p]
if forward_look > 0:
idx = pred.ix[(i - ndays):(i - ndays + ind+forward_look)].index
Y_TRAIN = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TRAIN = pred.switch.ix[X_TRAIN.index]
X_TEST = pred.ix[(i - ndays + ind):i,p]
if forward_look > 0:
idx = pred.ix[(i - ndays + ind):(i+forward_look),p].index
Y_TEST = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TEST = pred.switch.ix[X_TEST.index]
clf.fit(X_TRAIN, Y_TRAIN)
predicted = clf.predict(X_TEST)
# proba = clf.predict_proba(X_TEST)
a = clf.score(X_TEST, Y_TEST)
accuracy.append(a)
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
# X_TEST["PROB"] = proba
X_TEST["ACC"] = a
results = results.append(X_TEST.iloc[0])
self.model = clf
print("SVC model accuracy: " + str(np.mean(accuracy)))
return results
def GaussianNB(self, ndays,y_colums=None,forward_look=0):
"""
Creates an sklearn model and backtests it for given parameters in a training range.
:param ndays: int, Number of days to use as training
:param y_colums: What does the output of the model check against.
:param forward_look: int, How many days to look forward in time
:return: pandas dataframe containing predictions.
"""
pred = self.pred
if y_colums is None:
pred["switch"] = np.where((pred.make_splits(5, inplace=False).shift(1) / pred.make_splits(5, inplace=False))
> (1.0025 / 0.9975) + self.min_shift, 1, 0)
else:
pred["switch"] = pred[y_colums]
clf = GaussianNB()
results = pd.DataFrame()
accuracy = []
for i in range(ndays, len(pred.index)-forward_look):
# We perform a 80/20 split on the data
ind = int(np.round(ndays*0.8))
X_TRAIN = pred.ix[(i - ndays):(i - ndays + ind),p]
if forward_look > 0:
idx = pred.ix[(i - ndays):(i - ndays + ind+forward_look)].index
Y_TRAIN = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TRAIN = pred.switch.ix[X_TRAIN.index]
X_TEST = pred.ix[(i - ndays + ind):i,p]
if forward_look > 0:
idx = pred.ix[(i - ndays + ind):(i+forward_look),p].index
Y_TEST = pred.switch.ix[idx].shift(-1*forward_look)[:(-1*forward_look)]
else:
Y_TEST = pred.switch.ix[X_TEST.index]
clf.fit(X_TRAIN, Y_TRAIN)
predicted = clf.predict(X_TEST)
a = clf.score(X_TEST, Y_TEST)
accuracy.append(a)
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
X_TEST["ACC"] = a
results = results.append(X_TEST.iloc[0])
self.model = clf
print("GaussianNB model accuracy: " + str(np.mean(accuracy)))
return results
def short_xgboost_model(self, startdate, ndays, actual,forward_look=0):
"""
Creates an xgboost model and backtests it for given parameters in a training range. NOTE that this will
not backtest the entire dataset as it will take tooo long.
:param ndays: int, Number of days to use as training
:param startdate: pandas datetime to start the lookback
:param actual: What does the output of the model check against.
:param forward_look: int, How many days to look forward in time
:return: pandas dataframe containing predictions.
"""
pred = self.pred
cv_params = {'max_depth': [3, 5, 7, 9, 11], 'min_child_weight': [1, 3, 5, 7, 9]}
ind_params = {'learning_rate': 0.1, 'n_estimators': 1000, 'seed': 0, 'subsample': 0.8, 'colsample_bytree': 0.8,
'objective': 'binary:logistic'}
optimized_GBM = GridSearchCV(xgb.XGBClassifier(**ind_params),
cv_params,
scoring='accuracy', cv=5, n_jobs=-1)
ALL_X = pred.ix[startdate:].iloc[0:ndays]
if forward_look > 0:
ALL_Y = actual.shift(-1*forward_look)
ALL_Y = ALL_Y.ix[startdate:].iloc[0:ndays]
else:
ALL_Y = actual.ix[startdate:].iloc[0:ndays]
ind = int(np.round(ndays * 0.8))
X_TRAIN = ALL_X.iloc[0:ind]
Y_TRAIN = ALL_Y.iloc[0:ind]
X_TEST = ALL_X.iloc[ind:]
Y_TEST = ALL_Y.iloc[ind:]
optimized_GBM.fit(X_TRAIN, Y_TRAIN)
best = sorted(optimized_GBM.grid_scores_, key = lambda x: (x[1], -np.std(x[2]), -x.parameters['max_depth']))[-1].parameters
cv_params = {'learning_rate': [0.1, 0.01, 0.005], 'subsample': [0.7, 0.8, 0.9]}
ind_params = {'n_estimators': 1000, 'seed': 0, 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth': best["max_depth"],
'min_child_weight': best["min_child_weight"]}
optimized_GBM = GridSearchCV(xgb.XGBClassifier(**ind_params),
cv_params,
scoring='accuracy', cv=5, n_jobs=-1)
optimized_GBM.fit(X_TRAIN, Y_TRAIN)
best = {**best, **sorted(optimized_GBM.grid_scores_, key = lambda x: (x[1], -np.std(x[2]), x.parameters['subsample']))[-1].parameters}
our_params = {'eta': best["learning_rate"], 'seed': 0, 'subsample': best["subsample"], 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth': best["max_depth"],
'min_child_weight': best["min_child_weight"]}
xgdmat = xgb.DMatrix(X_TRAIN, Y_TRAIN) # Grid Search CV optimized settings
cv_xgb = xgb.cv(params=our_params, dtrain=xgdmat, num_boost_round=3000, nfold=5,
metrics=['error'], # Make sure you enter metrics inside a list or you may encounter issues!
early_stopping_rounds=100) # Look for early stopping that minimizes error
final_gb = xgb.train(our_params, xgdmat, num_boost_round=432)
testdmat = xgb.DMatrix(X_TEST)
y_pred = final_gb.predict(testdmat) # Predict using our testdmat
predicted = y_pred
predicted[predicted > 0.5] = 1
predicted[predicted <= 0.5] = 0
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
X_TEST["PROB"] = y_pred
ret = accuracy_score(predicted, Y_TEST), 1 - accuracy_score(predicted, Y_TEST)
X_TEST["ACC"] = ret[0]
self.model = our_params
print("Xgboost model accuracy: %s" % np.round(ret[0],4))
return X_TEST | {
"content_hash": "835857bbbcb6580460e6906a5338fd47",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 142,
"avg_line_length": 48.98738170347003,
"alnum_prop": 0.5498744284886342,
"repo_name": "simonward86/MySJcLqwwx",
"id": "ea0136912eb68a59512420a1e10fe7ebf0de2564",
"size": "15529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Methods/ML.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1716768"
},
{
"name": "Jupyter Notebook",
"bytes": "1029270"
},
{
"name": "Python",
"bytes": "81118"
}
],
"symlink_target": ""
} |
import os
import signal
import subprocess
from unittest import mock
from django.db.backends.postgresql.client import DatabaseClient
from django.test import SimpleTestCase
class PostgreSqlDbshellCommandTestCase(SimpleTestCase):
def _run_it(self, dbinfo):
"""
That function invokes the runshell command, while mocking
subprocess.run(). It returns a 2-tuple with:
- The command line list
- The dictionary of PG* environment variables, or {}.
"""
def _mock_subprocess_run(*args, env=os.environ, **kwargs):
self.subprocess_args = list(*args)
# PostgreSQL environment variables.
self.pg_env = {key: env[key] for key in env if key.startswith('PG')}
return subprocess.CompletedProcess(self.subprocess_args, 0)
with mock.patch('subprocess.run', new=_mock_subprocess_run):
DatabaseClient.runshell_db(dbinfo)
return self.subprocess_args, self.pg_env
def test_basic(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'password': 'somepassword',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
{'PGPASSWORD': 'somepassword'},
)
)
def test_nopass(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
{},
)
)
def test_ssl_certificate(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'host': 'somehost',
'port': '444',
'sslmode': 'verify-ca',
'sslrootcert': 'root.crt',
'sslcert': 'client.crt',
'sslkey': 'client.key',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
{
'PGSSLCERT': 'client.crt',
'PGSSLKEY': 'client.key',
'PGSSLMODE': 'verify-ca',
'PGSSLROOTCERT': 'root.crt',
},
)
)
def test_column(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'some:user',
'password': 'some:password',
'host': '::1',
'port': '444',
}), (
['psql', '-U', 'some:user', '-h', '::1', '-p', '444', 'dbname'],
{'PGPASSWORD': 'some:password'},
)
)
def test_accent(self):
username = 'rôle'
password = 'sésame'
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': username,
'password': password,
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', username, '-h', 'somehost', '-p', '444', 'dbname'],
{'PGPASSWORD': password},
)
)
def test_sigint_handler(self):
"""SIGINT is ignored in Python and passed to psql to abort quries."""
def _mock_subprocess_run(*args, **kwargs):
handler = signal.getsignal(signal.SIGINT)
self.assertEqual(handler, signal.SIG_IGN)
sigint_handler = signal.getsignal(signal.SIGINT)
# The default handler isn't SIG_IGN.
self.assertNotEqual(sigint_handler, signal.SIG_IGN)
with mock.patch('subprocess.run', new=_mock_subprocess_run):
DatabaseClient.runshell_db({})
# dbshell restores the original handler.
self.assertEqual(sigint_handler, signal.getsignal(signal.SIGINT))
| {
"content_hash": "eaa87aec92b225df09a6c531279899c4",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 84,
"avg_line_length": 34.621848739495796,
"alnum_prop": 0.4766990291262136,
"repo_name": "fenginx/django",
"id": "40d2deae6235f8b76a4f988470ba4adc6285adbb",
"size": "4122",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/dbshell/test_postgresql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48399"
},
{
"name": "HTML",
"bytes": "175296"
},
{
"name": "JavaScript",
"bytes": "238848"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11137863"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
frenchFries, salads, sandwiches, wraps, totalPrice = 10, 6, 4, 5, 118.5
| {
"content_hash": "28c2a5cf1c30af95e0eb8aa6882527b3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 71,
"avg_line_length": 72,
"alnum_prop": 0.7083333333333334,
"repo_name": "the-zebulan/CodeWars",
"id": "3e91bc1cf95439437802b275759357cc9483f802",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_8/grasshopper_shopping_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
MAX_UNCHANGE_COUNT = 100
STATE_INIT_PULL_DOWN = 1
STATE_INIT_PULL_UP = 2
STATE_DATA_FIRST_PULL_DOWN = 3
STATE_DATA_PULL_UP = 4
STATE_DATA_PULL_DOWN = 5
def read_dht11_dat(params):
GPIO.setup(params.pin, GPIO.OUT)
GPIO.output(params.pin, GPIO.HIGH)
time.sleep(0.05)
GPIO.output(params.pin, GPIO.LOW)
time.sleep(0.02)
GPIO.setup(params.pin, GPIO.IN, GPIO.PUD_UP)
unchanged_count = 0
last = -1
data = []
while True:
current = GPIO.input(params.pin)
data.append(current)
if last != current:
unchanged_count = 0
last = current
else:
unchanged_count += 1
if unchanged_count > MAX_UNCHANGE_COUNT:
break
state = STATE_INIT_PULL_DOWN
lengths = []
current_length = 0
for current in data:
current_length += 1
if state == STATE_INIT_PULL_DOWN:
if current == GPIO.LOW:
state = STATE_INIT_PULL_UP
else:
continue
if state == STATE_INIT_PULL_UP:
if current == GPIO.HIGH:
state = STATE_DATA_FIRST_PULL_DOWN
else:
continue
if state == STATE_DATA_FIRST_PULL_DOWN:
if current == GPIO.LOW:
state = STATE_DATA_PULL_UP
else:
continue
if state == STATE_DATA_PULL_UP:
if current == GPIO.HIGH:
current_length = 0
state = STATE_DATA_PULL_DOWN
else:
continue
if state == STATE_DATA_PULL_DOWN:
if current == GPIO.LOW:
lengths.append(current_length)
state = STATE_DATA_PULL_UP
else:
continue
if len(lengths) != 40:
return False
shortest_pull_up = min(lengths)
longest_pull_up = max(lengths)
halfway = (longest_pull_up + shortest_pull_up) / 2
bits = []
the_bytes = []
byte = 0
for length in lengths:
bit = 0
if length > halfway:
bit = 1
bits.append(bit)
for i in range(0, len(bits)):
byte = byte << 1
if (bits[i]):
byte = byte | 1
else:
byte = byte | 0
if ((i + 1) % 8 == 0):
the_bytes.append(byte)
byte = 0
checksum = (the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3]) & 0xFF
if the_bytes[4] != checksum:
return False
return the_bytes[0], the_bytes[2]
def GetValues(params):
result = read_dht11_dat(params)
humidity, temperature = result
newReading = {
'sensor': {
'uuid': params['uuid'],
'poller': 'DHT11Poller'
},
'reading': {
'timestamp': time.time() * 1000
}
}
if params['meta'] == 'temperature':
newReading['reading']['value'] = temperature * 9 / 5 + 32
elif params['meta'] == 'humidity':
newReading['reading']['value'] = humidity
return newReading
| {
"content_hash": "0ddceb864ed62acae5a0d08937ffd9a7",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 81,
"avg_line_length": 26.35593220338983,
"alnum_prop": 0.5157556270096463,
"repo_name": "cyrillegin/Aquaponics",
"id": "a87b039634054935ae2d6e7cfe4d5d82cabf2599",
"size": "3168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/sensorPlugins/DHT11Poller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10384"
},
{
"name": "C++",
"bytes": "98707"
},
{
"name": "CSS",
"bytes": "1575"
},
{
"name": "HTML",
"bytes": "8140"
},
{
"name": "JavaScript",
"bytes": "39613"
},
{
"name": "Python",
"bytes": "36932"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
} |
"""
State changes for accounts.
Also, the account is the aggregate root for invoices and charges,
so the creation of those is managed here.
"""
from collections import defaultdict
from datetime import date
from decimal import Decimal
from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple
from uuid import UUID
from django.db import transaction
from django.db.models import Count, Sum
from moneyed import Money
from structlog import get_logger
from billing.signals import invoice_ready
from . import invoices
from .invoices import PreconditionError
from ..models import (
Account, CARRIED_FORWARD, CREDIT_REMAINING, Charge, CreditCard,
EventLog, Invoice, ProductProperty, Transaction, total_amount,
)
from ..signals import new_compliant_account, new_delinquent_account
logger = get_logger()
def close(account_id: str) -> None:
"""
Closes the account.
:param account_id: the account to close
:return: Nothing
"""
logger.info('closing-account', account_id=account_id)
with transaction.atomic():
account = Account.objects.get(pk=account_id)
account.close()
account.save()
def reopen(account_id: str) -> None:
"""
Reopens the account.
:param account_id: the account to reopen
:return: Nothing
"""
logger.info('reopening-account', account_id=account_id)
with transaction.atomic():
account = Account.objects.get(pk=account_id)
account.reopen()
account.save()
def create_invoices(account_id: str, due_date: date) -> Sequence[Invoice]:
"""
Creates the invoices for any due positive charges in the account.
If there are due positive charges in different currencies, one invoice is created for each currency.
:param account_id: The account to invoice.
:param due_date: The due date for any invoice that gets created.
:return: A possibly-empty list of Invoices.
"""
invoices = []
with transaction.atomic():
due_charges = Charge.objects \
.uninvoiced(account_id=account_id) \
.charges()
total = total_amount(due_charges)
for amount_due in total.monies():
if amount_due.amount > 0:
invoice = Invoice.objects.create(account_id=account_id, due_date=due_date)
Charge.objects \
.uninvoiced(account_id=account_id) \
.charges() \
.in_currency(currency=amount_due.currency) \
.update(invoice=invoice)
invoices.append(invoice)
logger.info('created-invoices', account_id=str(account_id), invoice_ids=[i.pk for i in invoices])
for invoice in invoices:
invoice_ready.send(sender=create_invoices, invoice=invoice)
return invoices
def add_charge(account_id: str,
amount: Money,
reverses_id: Optional[str] = None,
product_code: Optional[str] = None,
product_properties: Optional[Dict[str, str]] = None) -> Charge:
"""
Add a charge to the account.
:param account_id: The account on which to add the charge
:param amount: The amount of the charge
:param reverses_id: Set this if this charge reverses another one
:param product_code: A code identifying the type of product cnarged
:param product_properties: A dict of hames and values.
:return: The newly created charge.
"""
logger.info('adding-charge', account_id=account_id, amount=amount, product_code=product_code,
product_properties=product_properties)
with transaction.atomic():
charge = Charge(account_id=account_id,
amount=amount)
if reverses_id:
charge.reverses_id = reverses_id
if product_code:
charge.product_code = product_code
charge.full_clean(exclude=['id', 'account']) # Exclude to avoid unnecessary db queries
charge.save(force_insert=True)
if product_properties:
objs = [ProductProperty(charge=charge, name=k, value=v) for k, v in product_properties.items()]
for o in objs:
o.full_clean(exclude=['id', 'charge']) # Exclude to avoid unnecessary db queries
ProductProperty.objects.bulk_create(objs)
return charge
def assign_funds_to_account_pending_invoices(account_id: str) -> Sequence[str]:
"""
Tries to pay pending account invoices (starting from the oldest) with available funds.
:param account_id: the account on which to perform the operation
:return: The ids of the invoices that were paid (possibly empty list).
"""
logger.info('assign-funds-to-pending-invoices', account_id=str(account_id))
paid_invoice_ids = []
for invoice in Invoice.objects.filter(status=Invoice.PENDING, account_id=account_id).order_by('due_date'):
invoice_was_paid = assign_funds_to_invoice(invoice.pk)
if invoice_was_paid:
paid_invoice_ids.append(invoice.id)
else:
break # Bail even though there may be funds in another currency to pay more recent invoices.
logger.info('assign-funds-to-pending-invoices.end', account_id=str(account_id),
paid_invoice_count=len(paid_invoice_ids))
return paid_invoice_ids
def assign_funds_to_invoice(invoice_id: str) -> bool:
"""
Uses the available funds on the account (credits and payments) to pay the given invoice.
:param invoice_id: The id of the invoice.
:return: True if the invoice status is paid.
A lot of side effects may occur in the database:
- Funds (either payments or credits) may get assigned to the invoice.
- The invoice status may change.
- Credits entities may be created.
"""
logger.info('assign-funds-to-invoice', invoice_id=invoice_id)
invoice = Invoice.objects.get(pk=invoice_id)
account_id = invoice.account_id
#
# Precondition. Don't touch invoices that are not PENDING
#
if invoice.status != Invoice.PENDING:
logger.info('assign-funds-to-invoice.status-is-not-pending', invoice_id=invoice_id)
return False
#
# Precondition: Only handle invoices in a single currency
#
invoice_due_monies = invoice.due().monies()
if len(invoice_due_monies) != 1:
logger.info('assign-funds-to-invoice.more-than-one-currency', invoice_id=invoice_id)
return False
invoice_due_amount = invoice_due_monies[0].amount
invoice_due_currency = invoice_due_monies[0].currency
#
# 1. Collect funds as long as long as we need them
#
if invoice_due_amount > 0:
payments = Transaction.successful \
.payments() \
.uninvoiced(account_id=account_id) \
.in_currency(invoice_due_currency) \
.order_by('created')
credits = Charge.objects \
.credits() \
.uninvoiced(account_id=account_id) \
.in_currency(invoice_due_currency) \
.order_by('created')
funds = list(credits) + list(payments)
for fund in funds:
contributed_amount = abs(fund.amount.amount) # 'abs' because credits have a negative value
logger.info('assign-funds-to-invoice.assigning-fund',
invoice_id=invoice_id,
fund_type=type(fund).__name__,
fund_id=str(fund.pk),
contributed_amount=contributed_amount)
fund.invoice_id = invoice_id
fund.save()
invoice_due_amount -= contributed_amount
if invoice_due_amount <= 0:
break
#
# 2. Mark invoice paid if nothing is due.
#
if invoice_due_amount <= 0:
logger.info('assign-funds-to-invoice.mark-paid', invoice_id=invoice_id, invoice_due_amount=invoice_due_amount)
invoice.status = Invoice.PAID
invoice.save()
#
# 3. Carry forward any overpaid money.
#
if invoice_due_amount < 0:
overpayment = Money(abs(invoice_due_amount), invoice_due_currency)
logger.info('assign-funds-to-invoice.handling-overpayment',
invoice_id=invoice_id,
overpayment=overpayment)
with transaction.atomic():
Charge.objects.create(account_id=account_id, amount=overpayment, product_code=CARRIED_FORWARD,
invoice_id=invoice_id)
Charge.objects.create(account_id=account_id, amount=-overpayment, product_code=CREDIT_REMAINING)
return invoice.status == Invoice.PAID
def get_accounts_which_delinquent_status_has_to_change(
account_ids: List[UUID]
) -> Tuple[List[UUID], List[UUID]]:
accounts = Account.objects.filter(id__in=account_ids)
new_delinquent_account_ids = []
new_compliant_account_ids = []
for account in accounts:
reasons = get_reasons_account_is_violating_delinquent_criteria(account.id)
if reasons and not account.delinquent:
new_delinquent_account_ids.append(account.id)
if not reasons and account.delinquent:
new_compliant_account_ids.append(account.id)
return new_delinquent_account_ids, new_compliant_account_ids
def get_reasons_account_is_violating_delinquent_criteria(
account_id: UUID
) -> List[str]:
reasons = []
account = Account.objects.get(id=account_id)
if account.invoices.filter(status=Invoice.PENDING).count() > 0:
reasons.append('Account has pending invoices')
if not CreditCard.objects.filter(account=account).valid().exists():
reasons.append('Account has not any valid credit card registered')
return reasons
def mark_account_as_delinquent(account_id: UUID, reason: str):
account = Account.objects.get(id=account_id)
if not account.delinquent:
logger.info('mark-account-as-delinquent', account_id=account_id, reason=reason)
account.delinquent = True
account.save()
EventLog.objects.create(
account_id=account_id,
type=EventLog.NEW_DELINQUENT,
text=reason,
)
new_delinquent_account.send(sender=mark_account_as_delinquent, account=account)
def mark_account_as_compliant(account_id: UUID, reason: str):
account = Account.objects.get(id=account_id)
if account.delinquent:
logger.info('mark-account-as-compliant', account_id=account_id, reason=reason)
account.delinquent = False
account.save()
EventLog.objects.create(
account_id=account_id,
type=EventLog.NEW_COMPLIANT,
text=reason,
)
new_compliant_account.send(sender=mark_account_as_compliant, account=account)
def charge_pending_invoices(account_id: UUID) -> Dict[str, int]:
account = Account.objects.get(id=account_id)
pending_invoices = account.invoices.payable().only('pk')
logger.info('charge-pending-invoices', pending_invoices=pending_invoices)
payment_transactions = []
for invoice in pending_invoices:
try:
payment_transaction = invoices.pay_with_account_credit_cards(invoice.pk)
if payment_transaction:
payment_transactions.append(payment_transaction)
except PreconditionError:
continue
reasons = get_reasons_account_is_violating_delinquent_criteria(account.id)
if not reasons:
mark_account_as_compliant(account.id, reason='Pending invoices have been paid')
num_paid_invoices = len(payment_transactions)
return {
'num_paid_invoices': num_paid_invoices,
'num_failed_invoices': len(pending_invoices) - num_paid_invoices
}
def get_account_valid_credit_card_map(
billing_account_ids: List[UUID]
) -> DefaultDict[UUID, bool]:
"""
Returns a map with a boolean which indicates if the account has an active and valid
credit card with which to pay
{
'<account_id_1>': True,
'<account_id_2>': False
...
}
"""
acount_valid_cc_map = defaultdict(bool)
objs = CreditCard.objects.filter(
account_id__in=billing_account_ids,
status=CreditCard.ACTIVE
).valid().values('account_id').annotate(n_valid_credit_cards=Count('id'))
for obj in objs:
acount_valid_cc_map[obj['account_id']] = bool(obj['n_valid_credit_cards'])
return acount_valid_cc_map
def get_account_balance_map(
billing_account_ids: List[UUID]
) -> DefaultDict[UUID, DefaultDict[str, Decimal]]:
"""
Returns a map with the currency balances for each account
{
'<account_id_1>': {
'CHF': 10.,
'EUR': -20.,
'NOK': 0.
},
...
}
"""
account_charges_map = defaultdict(lambda: defaultdict(Decimal)) # type: DefaultDict
for obj in Transaction.successful.filter(
account_id__in=billing_account_ids
).values('account_id', 'amount_currency').annotate(sum=Sum('amount')):
account_charges_map[obj['account_id']][obj['amount_currency']] += obj['sum']
for obj in Charge.objects.filter(
account_id__in=billing_account_ids
).values('account_id', 'amount_currency').annotate(sum=Sum('amount')):
account_charges_map[obj['account_id']][obj['amount_currency']] -= obj['sum']
return account_charges_map
| {
"content_hash": "88a7566bd2f408ba5613d9dceddea0f6",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 118,
"avg_line_length": 36.52328767123288,
"alnum_prop": 0.6463881179206361,
"repo_name": "skioo/django-customer-billing",
"id": "32a57b38cc1cebbfd4104f7df2223224f34c2a98",
"size": "13331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "billing/actions/accounts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "508"
},
{
"name": "Python",
"bytes": "179383"
}
],
"symlink_target": ""
} |
from __future__ import division
from otree.common import Currency as c, currency_range, safe_json
from . import models
from ._builtin import Page, WaitPage
from .models import Constants
class Choice1(Page):
form_model = models.Player
form_fields = ['first_choice']
def vars_for_template(self):
return {
'player_in_previous_rounds': self.player.in_previous_rounds(),
'respondants_num': range(1, Constants.respondants_num+1),
'common_ranks': self.session.vars['common_ranks'],
'private_ranks': self.session.vars['prop_prefs'][self.player.id_in_group-1],
'power': self.session.vars['power'][self.player.id_in_group-1],
}
class Choice2(Page):
form_model = models.Player
form_fields = ['second_choice']
def is_displayed(self):
return self.player.matched == Constants.respondants_num
def vars_for_template(self):
return {
'player_in_previous_rounds': self.player.in_previous_rounds(),
'respondants_num': range(1, Constants.respondants_num+1),
'players_num': range(1, Constants.players_per_group+1),
'common_ranks': self.session.vars['common_ranks'],
'private_ranks': self.session.vars['prop_prefs'][self.player.id_in_group-1],
'power': self.session.vars['power'][self.player.id_in_group-1],
'players': self.subsession.get_players,
'match_pair': self.player.matched,
}
class Choice3(Page):
form_model = models.Player
form_fields = ['third_choice']
def is_displayed(self):
return self.player.matched == Constants.respondants_num
def vars_for_template(self):
return {
'player_in_previous_rounds': self.player.in_previous_rounds(),
'respondants_num': range(1, Constants.respondants_num+1),
'players_num': range(1, Constants.players_per_group+1),
'common_ranks': self.session.vars['common_ranks'],
'private_ranks': self.session.vars['prop_prefs'][self.player.id_in_group-1],
'power': self.session.vars['power'][self.player.id_in_group-1],
'players': self.subsession.get_players,
'match_pair': self.player.matched,
}
class ResultsWaitPage1(WaitPage):
def after_all_players_arrive(self):
self.group.set_payoffs1()
class ResultsWaitPage2(WaitPage):
def after_all_players_arrive(self):
self.group.set_payoffs2()
class ResultsWaitPage3(WaitPage):
def after_all_players_arrive(self):
self.group.set_payoffs3()
class NextRoundWaitPage(WaitPage):
def after_all_players_arrive(self):
self.subsession.before_session_starts()
class EachResults(Page):
def is_displayed(self):
return self.player.matched != Constants.respondants_num
def vars_for_template(self):
return {
'player_in_previous_rounds': self.player.in_previous_rounds(),
'respondants_num': range(1, Constants.respondants_num+1),
'players_num': range(1, Constants.players_per_group+1),
'common_ranks': self.session.vars['common_ranks'],
'private_ranks': self.session.vars['prop_prefs'][self.player.id_in_group-1],
'power': self.session.vars['power'][self.player.id_in_group-1],
'first_choice': self.player.first_choice,
'second_choice': self.player.second_choice,
'third_choice': self.player.third_choice,
'players': self.subsession.get_players,
'match_pair': self.player.matched,
'payoff': self.player.payoff,
}
class Results(Page):
def vars_for_template(self):
return {
'player_in_previous_rounds': self.player.in_previous_rounds(),
'respondants_num': range(1, Constants.respondants_num+1),
'players_num': range(1, Constants.players_per_group+1),
'common_ranks': self.session.vars['common_ranks'],
'private_ranks': self.session.vars['prop_prefs'][self.player.id_in_group-1],
'power': self.session.vars['power'][self.player.id_in_group-1],
'first_choice': self.player.first_choice,
'second_choice': self.player.second_choice,
'third_choice': self.player.third_choice,
'players': self.subsession.get_players,
'match_pair': self.player.matched,
'payoff': self.player.payoff,
}
class ResultsSummary(Page):
def is_displayed(self):
return self.subsession.round_number == Constants.num_rounds
def vars_for_template(self):
return {
'total_payoff': sum([p.payoff for p in self.player.in_all_rounds()]),
'player_in_all_rounds': self.player.in_all_rounds(),
}
page_sequence = [
Choice1,
ResultsWaitPage1,
Choice2,
EachResults,
ResultsWaitPage2,
Choice3,
EachResults,
ResultsWaitPage3,
Results,
NextRoundWaitPage,
ResultsSummary
]
| {
"content_hash": "9bb777d3c14741772fc6b06ab0e6a337",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 88,
"avg_line_length": 32.76729559748428,
"alnum_prop": 0.6024952015355086,
"repo_name": "ogaway/ExEcon",
"id": "2f97f9c892cffc621bcf0697002272f40536273b",
"size": "5235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matching_oto_bs/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26419"
},
{
"name": "Python",
"bytes": "24675"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('finance', '0006_auto_20141125_0121'),
]
operations = [
migrations.RenameModel(
old_name='WeekDay',
new_name='WeekNumber',
),
]
| {
"content_hash": "89918b267899fadbee0f26b8e0c1ec32",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 19.705882352941178,
"alnum_prop": 0.5940298507462687,
"repo_name": "junqueira/balance",
"id": "36c9bb0990c234e503213c84569227e73cd2d59b",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finance/migrations/0007_auto_20141125_2339.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1449"
},
{
"name": "Python",
"bytes": "95550"
}
],
"symlink_target": ""
} |
"""
Exception raised when depended on another request and that request failed.
"""
from rinzler.exceptions import RinzlerHttpException
__author__ = ["Rinzler<github.com/feliphebueno>", "4ndr<github.com/4ndr>"]
class FailedDependencyException(RinzlerHttpException):
"""
FailedDependencyException
"""
status_code = 424
exception_name = "Failed Dependency"
| {
"content_hash": "8ee6121348f3e2ed1736479db4520b02",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.7374005305039788,
"repo_name": "feliphebueno/Rinzler",
"id": "ff1d8195650b1161c3436c8fb4d3d084ba565277",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2",
"path": "rinzler/exceptions/failed_dependency_exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35437"
}
],
"symlink_target": ""
} |
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
client.sip \
.ip_addresses("AL32a3c49700934481addd5ce1659f04d2") \
.delete("IP32a3c49700934481addd5ce1659f04d2")
| {
"content_hash": "05de0c223957461d85abb849ef7cc5b1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 62,
"avg_line_length": 36,
"alnum_prop": 0.7972222222222223,
"repo_name": "teoreteetik/api-snippets",
"id": "9c70bab7bed57d8a1adcaa01d57c3b5cb6055bd4",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/sip-in/delete-address-instance/delete-address-instance.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton import Message
from system_test import Logger, TestCase, Qdrouterd, main_module, unittest, TIMEOUT, TestTimeout, PollTimeout
from proton.handlers import MessagingHandler
from proton.reactor import Container
from qpid_dispatch_internal.compat import BINARY
class RouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name, connection):
config = [
('router', {'mode': 'interior', 'id': name, 'allowUnsettledMulticast': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'role': 'route-container'}),
('linkRoute', {'prefix': 'link', 'direction': 'in', 'containerId': 'LRC'}),
('linkRoute', {'prefix': 'link', 'direction': 'out', 'containerId': 'LRC'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'spread', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', ('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', ('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port, 'verifyHostname': 'no'}))
cls.routers[0].wait_router_connected('B')
cls.routers[1].wait_router_connected('A')
def test_01_message_route_truncated_one_router(self):
test = MessageRouteTruncateTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
"addr_01")
test.run()
self.assertEqual(None, test.error)
def test_02_message_route_truncated_two_routers(self):
test = MessageRouteTruncateTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
"addr_02")
test.run()
self.assertEqual(None, test.error)
def test_03_link_route_truncated_one_router(self):
test = LinkRouteTruncateTest(self.routers[0].addresses[0],
self.routers[0].addresses[1],
"link.addr_03",
self.routers[0].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_04_link_route_truncated_two_routers(self):
test = LinkRouteTruncateTest(self.routers[1].addresses[0],
self.routers[0].addresses[1],
"link.addr_04",
self.routers[1].addresses[0])
test.run()
self.assertEqual(None, test.error)
def test_05_message_route_abort_one_router(self):
test = MessageRouteAbortTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
"addr_05")
test.run()
if test.error:
test.logger.dump()
self.assertEqual(None, test.error)
def test_06_message_route_abort_two_routers(self):
test = MessageRouteAbortTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
"addr_06")
test.run()
if test.error:
test.logger.dump()
self.assertEqual(None, test.error)
def test_07_multicast_truncate_one_router(self):
test = MulticastTruncateTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
self.routers[0].addresses[0],
"multicast.addr_07")
test.run()
self.assertEqual(None, test.error)
class Entity(object):
def __init__(self, status_code, status_description, attrs):
self.status_code = status_code
self.status_description = status_description
self.attrs = attrs
def __getattr__(self, key):
return self.attrs[key]
class RouterProxy(object):
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return Entity(ap['statusCode'], ap['statusDescription'], msg.body)
def read_address(self, name):
ap = {'operation': 'READ', 'type': 'org.apache.qpid.dispatch.router.address', 'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
class MessageRouteTruncateTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, address):
super(MessageRouteTruncateTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.address = address
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender1 = None
self.sender2 = None
self.sender3 = None
self.receiver = None
self.streaming = False
self.delivery = None
self.data = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
self.long_data = ""
self.sent_stream = 0
self.program = ['Send_Short_1', 'Send_Long_Truncated', 'Send_Short_2', 'Send_Short_3']
self.result = []
self.expected_result = ['Send_Short_1', 'Aborted_Delivery', '2', '2', '2', '2', '2',
'2', '2', '2', '2', '2', 'Send_Short_2', '3', '3', '3', '3',
'3', '3', '3', '3', '3', '3', 'Send_Short_3']
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (self.program, self.result)
self.sender_conn.close()
self.receiver_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.sender1 = event.container.create_sender(self.sender_conn, self.address, name="S1")
self.sender2 = event.container.create_sender(self.sender_conn, self.address, name="S2")
self.sender3 = event.container.create_sender(self.sender_conn, self.address, name="S3")
self.receiver = event.container.create_receiver(self.receiver_conn, self.address)
def stream(self):
self.sender1.stream(BINARY(self.long_data))
self.sent_stream += len(self.long_data)
if self.sent_stream >= 1000000:
self.streaming = False
self.sender1.close()
self.send()
def send(self):
next_op = self.program.pop(0) if len(self.program) > 0 else None
if next_op == 'Send_Short_1':
m = Message(body="%s" % next_op)
self.sender1.send(m)
elif next_op == 'Send_Long_Truncated':
for i in range(100):
self.long_data += self.data
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
self.streaming = True
self.stream()
elif next_op == 'Send_Short_2':
m = Message(body="2")
for i in range(10):
self.sender2.send(m)
m = Message(body="Send_Short_2")
self.sender2.send(m)
self.sender2.close()
elif next_op == 'Send_Short_3':
m = Message(body="3")
for i in range(10):
self.sender3.send(m)
m = Message(body="%s" % next_op)
self.sender3.send(m)
self.sender_conn.close()
def on_sendable(self, event):
if event.sender == self.sender1 and self.program[0] == 'Send_Short_1':
self.send()
if self.streaming:
self.stream()
def on_message(self, event):
m = event.message
self.result.append(m.body)
if m.body == 'Send_Short_1':
self.send()
elif m.body == 'Send_Short_2':
self.send()
elif m.body == 'Send_Short_3':
if self.result != self.expected_result:
self.error = "Expected: %r, Actual: %r" % (self.expected_result, self.result)
self.receiver_conn.close()
self.timer.cancel()
def on_aborted(self, event):
self.result.append('Aborted_Delivery')
self.send()
def run(self):
Container(self).run()
class LinkRouteTruncateTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, address, query_host):
super(LinkRouteTruncateTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.address = address
self.query_host = query_host
self.sender_conn = None
self.receiver_conn = None
self.query_conn = None
self.error = None
self.sender1 = None
self.receiver = None
self.poll_timer = None
self.streaming = False
self.delivery = None
self.data = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
self.long_data = ""
self.sent_stream = 0
self.program = ['Send_Short_1', 'Send_Long_Truncated']
self.result = []
self.expected_result = ['Send_Short_1', 'Aborted_Delivery']
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (self.program, self.result)
self.sender_conn.close()
self.receiver_conn.close()
self.query_conn.close()
if self.poll_timer:
self.poll_timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.query_conn = event.container.connect(self.query_host)
self.reply_receiver = event.container.create_receiver(self.query_conn, dynamic=True)
self.agent_sender = event.container.create_sender(self.query_conn, "$management")
def setup_first_links(self, event):
self.sender1 = event.container.create_sender(self.sender_conn, self.address, name="S1")
def stream(self):
self.sender1.stream(BINARY(self.long_data))
self.sent_stream += len(self.long_data)
if self.sent_stream >= 1000000:
self.streaming = False
self.sender1.close()
def send(self):
next_op = self.program.pop(0) if len(self.program) > 0 else None
if next_op == 'Send_Short_1':
m = Message(body="%s" % next_op)
self.sender1.send(m)
elif next_op == 'Send_Long_Truncated':
for i in range(100):
self.long_data += self.data
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
self.streaming = True
self.stream()
def poll_timeout(self):
self.poll()
def poll(self):
request = self.proxy.read_address('Clink')
self.agent_sender.send(request)
def on_sendable(self, event):
if event.sender == self.sender1 and len(self.program) > 0 and self.program[0] == 'Send_Short_1':
self.send()
if event.sender == self.sender1 and self.streaming:
self.stream()
def on_link_opening(self, event):
if event.receiver:
self.receiver = event.receiver
event.receiver.target.address = self.address
event.receiver.open()
def on_link_opened(self, event):
if event.receiver == self.reply_receiver:
self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
self.poll()
def on_message(self, event):
if event.receiver == self.reply_receiver:
response = self.proxy.response(event.message)
if response.status_code == 200 and (response.remoteCount + response.containerCount) > 0:
if self.poll_timer:
self.poll_timer.cancel()
self.poll_timer = None
self.setup_first_links(event)
else:
self.poll_timer = event.reactor.schedule(0.25, PollTimeout(self))
return
m = event.message
self.result.append(m.body)
if m.body == 'Send_Short_1':
self.send()
def on_aborted(self, event):
self.result.append('Aborted_Delivery')
if self.result != self.expected_result:
self.error = "Expected: %r, Actual: %r" % (self.expected_result, self.result)
self.sender_conn.close()
self.receiver_conn.close()
self.query_conn.close()
self.timer.cancel()
def run(self):
container = Container(self)
container.container_id="LRC"
container.run()
class MessageRouteAbortTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, address):
super(MessageRouteAbortTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.address = address
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender1 = None
self.receiver = None
self.delivery = None
self.logger = Logger(title="MessageRouteAbortTest")
self.program = [('D', 10), ('D', 20), ('A', 30), ('A', 40), ('D', 50), ('D', 60),
('A', 100), ('D', 110),
('A', 1000), ('A', 1010), ('A', 1020), ('A', 1030), ('A', 1040), ('D', 1050),
('A', 10000), ('A', 10010), ('A', 10020), ('A', 10030), ('A', 10040), ('D', 10050),
('A', 100000), ('A', 100010), ('A', 100020), ('A', 100030), ('A', 100040), ('D', 100050), ('F', 10)]
self.result = []
self.expected_result = [10, 20, 50, 60, 110, 1050, 10050, 100050]
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (self.program, self.result)
self.logger.log(self.error)
self.sender_conn.close()
self.receiver_conn.close()
def on_start(self, event):
self.logger.log("on_start")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.sender1 = event.container.create_sender(self.sender_conn, self.address, name="S1")
self.receiver = event.container.create_receiver(self.receiver_conn, self.address)
def send(self):
if self.delivery:
self.logger.log("send(): Do not send - delivery to be aborted is in flight")
return
op, size = self.program.pop(0) if len(self.program) > 0 else (None, None)
self.logger.log("send - op=%s, size=%s" % (str(op), str(size)))
if op == None:
return
body = ""
if op == 'F':
body = "FINISH"
else:
bod = str(size)
bod2 = "0000000000" + bod
bod3 = "." + bod2[-9:]
body = bod3 * (size // 10)
msg = Message(body=body)
if op in 'DF':
self.logger.log("send(): Send message size: %d" % (size))
delivery = self.sender1.send(msg)
if op == 'A':
self.logger.log("send(): Start aborted message size: %d" % (size))
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
encoded = msg.encode()
self.sender1.stream(encoded)
def finish(self):
if self.result != self.expected_result:
self.error = "Expected: %r, Actual: %r" % (self.expected_result, self.result)
self.logger.log(self.error)
self.sender_conn.close()
self.receiver_conn.close()
self.timer.cancel()
def on_sendable(self, event):
self.logger.log("on_sendable")
if event.sender == self.sender1:
if self.delivery:
self.delivery.abort()
self.delivery = None
self.logger.log("on_sendable aborts delivery")
else:
self.send()
def on_message(self, event):
m = event.message
if m.body == "FINISH":
self.finish()
else:
self.logger.log("on_message receives len: %d" %(len(m.body)))
self.result.append(len(m.body))
self.send()
def run(self):
Container(self).run()
class MulticastTruncateTest(MessagingHandler):
def __init__(self, sender_host, receiver_host1, receiver_host2, address):
super(MulticastTruncateTest, self).__init__()
self.sender_host = sender_host
self.receiver_host1 = receiver_host1
self.receiver_host2 = receiver_host2
self.address = address
self.r_attach_count = 0
self.senders_created = False
self.sender_conn = None
self.receiver1_conn = None
self.receiver2_conn = None
self.error = None
self.sender1 = None
self.sender2 = None
self.sender3 = None
self.receiver1 = None
self.receiver2 = None
self.streaming = False
self.delivery = None
self.data = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
self.long_data = ""
self.completions = 0
self.sent_stream = 0
self.program = ['Send_Short_1', 'Send_Long_Truncated', 'Send_Short_2', 'Send_Short_3']
self.result1 = []
self.result2 = []
self.expected_result = ['Send_Short_1', 'Aborted_Delivery', '2', '2', '2', '2', '2',
'2', '2', '2', '2', '2', 'Send_Short_2', '3', '3', '3', '3',
'3', '3', '3', '3', '3', '3', 'Send_Short_3']
def timeout(self):
self.error = "Timeout Expired - Unprocessed Ops: %r, Result1: %r, Result2: %r" % (self.program, self.result1, self.result2)
self.sender_conn.close()
self.receiver1_conn.close()
self.receiver2_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver1_conn = event.container.connect(self.receiver_host1)
self.receiver2_conn = event.container.connect(self.receiver_host2)
self.receiver1 = event.container.create_receiver(self.receiver1_conn, self.address)
self.receiver2 = event.container.create_receiver(self.receiver2_conn, self.address)
def stream(self):
self.sender1.stream(BINARY(self.long_data))
self.sent_stream += len(self.long_data)
if self.sent_stream >= 1000000:
self.streaming = False
self.sender1.close()
self.send()
def send(self):
if self.streaming:
self.stream()
return
next_op = self.program.pop(0) if len(self.program) > 0 else None
if next_op == 'Send_Short_1':
m = Message(body="%s" % next_op)
self.sender1.send(m)
elif next_op == 'Send_Long_Truncated':
for i in range(100):
self.long_data += self.data
self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
self.streaming = True
self.stream()
elif next_op == 'Send_Short_2':
m = Message(body="2")
for i in range(10):
self.sender2.send(m)
m = Message(body="Send_Short_2")
self.sender2.send(m)
self.sender2.close()
elif next_op == 'Send_Short_3':
m = Message(body="3")
for i in range(10):
self.sender3.send(m)
m = Message(body="%s" % next_op)
self.sender3.send(m)
self.sender_conn.close()
def on_sendable(self, event):
self.send()
def on_link_opened(self, event):
if event.receiver == self.receiver1:
self.r_attach_count += 1
if event.receiver == self.receiver2:
self.r_attach_count += 1
if self.r_attach_count == 2 and not self.senders_created:
self.senders_created = True
self.sender1 = event.container.create_sender(self.sender_conn,
self.address,
name="S1")
self.sender2 = event.container.create_sender(self.sender_conn,
self.address,
name="S2")
self.sender3 = event.container.create_sender(self.sender_conn,
self.address,
name="S3")
def on_message(self, event):
m = event.message
if event.receiver == self.receiver1:
self.result1.append(m.body)
elif event.receiver == self.receiver2:
self.result2.append(m.body)
if m.body == 'Send_Short_1':
self.send()
elif m.body == 'Send_Short_2':
self.send()
elif m.body == 'Send_Short_3':
self.completions += 1
if self.completions == 2:
if self.result1 != self.expected_result or self.result2 != self.expected_result:
self.error = "Expected: %r, Actuals: %r, %r" % (self.expected_result, self.result1, self.result2)
self.receiver1_conn.close()
self.receiver2_conn.close()
self.timer.cancel()
def on_aborted(self, event):
if event.receiver == self.receiver1:
self.result1.append('Aborted_Delivery')
elif event.receiver == self.receiver2:
self.result2.append('Aborted_Delivery')
self.send()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| {
"content_hash": "93ea151850cb7abbc6180385f8471f7e",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 135,
"avg_line_length": 39.62311557788945,
"alnum_prop": 0.5446628619742127,
"repo_name": "bhardesty/qpid-dispatch",
"id": "36d7a91ab6de37f9581f83e1c7afb5e87ed30ef9",
"size": "24445",
"binary": false,
"copies": "1",
"ref": "refs/heads/fix-xref",
"path": "tests/system_tests_delivery_abort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1933791"
},
{
"name": "C++",
"bytes": "58231"
},
{
"name": "CMake",
"bytes": "43336"
},
{
"name": "CSS",
"bytes": "57551"
},
{
"name": "Dockerfile",
"bytes": "3278"
},
{
"name": "HTML",
"bytes": "38799"
},
{
"name": "Java",
"bytes": "1940"
},
{
"name": "JavaScript",
"bytes": "1026704"
},
{
"name": "Python",
"bytes": "2085723"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from hy import HyString
from hy.models import HyObject
from hy.compiler import hy_compile
from hy.errors import HyCompileError, HyTypeError
from hy.lex.exceptions import LexException
from hy.lex import tokenize
from hy._compat import PY3
import ast
def _ast_spotcheck(arg, root, secondary):
if "." in arg:
local, full = arg.split(".", 1)
return _ast_spotcheck(full,
getattr(root, local),
getattr(secondary, local))
assert getattr(root, arg) == getattr(secondary, arg)
def can_compile(expr):
return hy_compile(tokenize(expr), "__main__")
def cant_compile(expr):
try:
hy_compile(tokenize(expr), "__main__")
assert False
except HyTypeError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.expression, HyObject)
assert e.message
return e
except HyCompileError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.exception, HyTypeError)
assert e.traceback
return e
def test_ast_bad_type():
"Make sure AST breakage can happen"
try:
hy_compile("foo", "__main__")
assert True is False
except HyCompileError:
pass
def test_ast_bad_if():
"Make sure AST can't compile invalid if*"
cant_compile("(if*)")
cant_compile("(if* foobar)")
cant_compile("(if* 1 2 3 4 5)")
def test_ast_valid_if():
"Make sure AST can compile valid if*"
can_compile("(if* foo bar)")
def test_ast_valid_unary_op():
"Make sure AST can compile valid unary operator"
can_compile("(not 2)")
can_compile("(~ 1)")
def test_ast_invalid_unary_op():
"Make sure AST can't compile invalid unary operator"
cant_compile("(not 2 3 4)")
cant_compile("(not)")
cant_compile("(not 2 3 4)")
cant_compile("(~ 2 2 3 4)")
cant_compile("(~)")
def test_ast_bad_while():
"Make sure AST can't compile invalid while"
cant_compile("(while)")
cant_compile("(while (True))")
def test_ast_good_do():
"Make sure AST can compile valid do"
can_compile("(do)")
can_compile("(do 1)")
def test_ast_good_raise():
"Make sure AST can compile valid raise"
can_compile("(raise)")
can_compile("(raise Exception)")
can_compile("(raise e)")
if PY3:
def test_ast_raise_from():
can_compile("(raise Exception :from NameError)")
def test_ast_bad_raise():
"Make sure AST can't compile invalid raise"
cant_compile("(raise Exception Exception)")
def test_ast_good_try():
"Make sure AST can compile valid try"
can_compile("(try)")
can_compile("(try 1)")
can_compile("(try 1 (except) (else 1))")
can_compile("(try 1 (else 1) (except))")
can_compile("(try 1 (finally 1) (except))")
can_compile("(try 1 (finally 1))")
can_compile("(try 1 (except) (finally 1))")
can_compile("(try 1 (except) (finally 1) (else 1))")
can_compile("(try 1 (except) (else 1) (finally 1))")
def test_ast_bad_try():
"Make sure AST can't compile invalid try"
cant_compile("(try 1 bla)")
cant_compile("(try 1 bla bla)")
cant_compile("(try (do) (else 1) (else 2))")
cant_compile("(try 1 (else 1))")
def test_ast_good_except():
"Make sure AST can compile valid except"
can_compile("(try 1 (except))")
can_compile("(try 1 (except []))")
can_compile("(try 1 (except [Foobar]))")
can_compile("(try 1 (except [[]]))")
can_compile("(try 1 (except [x FooBar]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
def test_ast_bad_except():
"Make sure AST can't compile invalid except"
cant_compile("(except 1)")
cant_compile("(try 1 (except 1))")
cant_compile("(try 1 (except [1 3]))")
cant_compile("(try 1 (except [x [FooBar] BarBar]))")
def test_ast_good_assert():
"""Make sure AST can compile valid asserts. Asserts may or may not
include a label."""
can_compile("(assert 1)")
can_compile("(assert 1 \"Assert label\")")
can_compile("(assert 1 (+ \"spam \" \"eggs\"))")
can_compile("(assert 1 12345)")
can_compile("(assert 1 None)")
can_compile("(assert 1 (+ 2 \"incoming eggsception\"))")
def test_ast_bad_assert():
"Make sure AST can't compile invalid assert"
cant_compile("(assert)")
cant_compile("(assert 1 2 3)")
cant_compile("(assert 1 [1 2] 3)")
def test_ast_good_global():
"Make sure AST can compile valid global"
can_compile("(global a)")
can_compile("(global foo bar)")
def test_ast_bad_global():
"Make sure AST can't compile invalid global"
cant_compile("(global)")
cant_compile("(global (foo))")
if PY3:
def test_ast_good_nonlocal():
"Make sure AST can compile valid nonlocal"
can_compile("(nonlocal a)")
can_compile("(nonlocal foo bar)")
def test_ast_bad_nonlocal():
"Make sure AST can't compile invalid nonlocal"
cant_compile("(nonlocal)")
cant_compile("(nonlocal (foo))")
def test_ast_good_defclass():
"Make sure AST can compile valid defclass"
can_compile("(defclass a)")
can_compile("(defclass a [])")
def test_ast_bad_defclass():
"Make sure AST can't compile invalid defclass"
cant_compile("(defclass)")
cant_compile("(defclass a None)")
cant_compile("(defclass a None None)")
def test_ast_good_lambda():
"Make sure AST can compile valid lambda"
can_compile("(lambda [])")
can_compile("(lambda [] 1)")
def test_ast_bad_lambda():
"Make sure AST can't compile invalid lambda"
cant_compile("(lambda)")
def test_ast_good_yield():
"Make sure AST can compile valid yield"
can_compile("(yield 1)")
def test_ast_bad_yield():
"Make sure AST can't compile invalid yield"
cant_compile("(yield 1 2)")
def test_ast_good_import_from():
"Make sure AST can compile valid selective import"
can_compile("(import [x [y]])")
def test_ast_require():
"Make sure AST respects (require) syntax"
can_compile("(require tests.resources.tlib)")
can_compile("(require [tests.resources.tlib [qplah parald]])")
can_compile("(require [tests.resources.tlib [*]])")
can_compile("(require [tests.resources.tlib :as foobar])")
can_compile("(require [tests.resources.tlib [qplah :as quiz]])")
can_compile("(require [tests.resources.tlib [qplah :as quiz parald]])")
cant_compile("(require [tests.resources.tlib])")
cant_compile("(require [tests.resources.tlib [* qplah]])")
cant_compile("(require [tests.resources.tlib [qplah *]])")
cant_compile("(require [tests.resources.tlib [* *]])")
def test_ast_good_get():
"Make sure AST can compile valid get"
can_compile("(get x y)")
def test_ast_bad_get():
"Make sure AST can't compile invalid get"
cant_compile("(get)")
cant_compile("(get 1)")
def test_ast_good_cut():
"Make sure AST can compile valid cut"
can_compile("(cut x)")
can_compile("(cut x y)")
can_compile("(cut x y z)")
can_compile("(cut x y z t)")
def test_ast_bad_cut():
"Make sure AST can't compile invalid cut"
cant_compile("(cut)")
cant_compile("(cut 1 2 3 4 5)")
def test_ast_good_take():
"Make sure AST can compile valid 'take'"
can_compile("(take 1 [2 3])")
def test_ast_good_drop():
"Make sure AST can compile valid 'drop'"
can_compile("(drop 1 [2 3])")
def test_ast_good_assoc():
"Make sure AST can compile valid assoc"
can_compile("(assoc x y z)")
def test_ast_bad_assoc():
"Make sure AST can't compile invalid assoc"
cant_compile("(assoc)")
cant_compile("(assoc 1)")
cant_compile("(assoc 1 2)")
cant_compile("(assoc 1 2 3 4)")
def test_ast_bad_with():
"Make sure AST can't compile invalid with"
cant_compile("(with*)")
cant_compile("(with* [])")
cant_compile("(with* [] (pass))")
def test_ast_valid_while():
"Make sure AST can't compile invalid while"
can_compile("(while foo bar)")
def test_ast_valid_for():
"Make sure AST can compile valid for"
can_compile("(for [a 2] (print a))")
def test_ast_invalid_for():
"Make sure AST can't compile invalid for"
cant_compile("(for* [a 1] (else 1 2))")
def test_ast_valid_let():
"Make sure AST can compile valid let"
can_compile("(let [a b])")
can_compile("(let [a 1])")
can_compile("(let [a 1 b None])")
def test_ast_invalid_let():
"Make sure AST can't compile invalid let"
cant_compile("(let 1)")
cant_compile("(let [1])")
cant_compile("(let [a 1 2])")
cant_compile("(let [a])")
cant_compile("(let [1])")
def test_ast_expression_basics():
""" Ensure basic AST expression conversion works. """
code = can_compile("(foo bar)").body[0]
tree = ast.Expr(value=ast.Call(
func=ast.Name(
id="foo",
ctx=ast.Load(),
),
args=[
ast.Name(id="bar", ctx=ast.Load())
],
keywords=[],
starargs=None,
kwargs=None,
))
_ast_spotcheck("value.func.id", code, tree)
def test_ast_anon_fns_basics():
""" Ensure anon fns work. """
code = can_compile("(fn (x) (* x x))").body[0]
assert type(code) == ast.FunctionDef
code = can_compile("(fn (x))").body[0]
cant_compile("(fn)")
def test_ast_non_decoratable():
""" Ensure decorating garbage breaks """
cant_compile("(with-decorator (foo) (* x x))")
def test_ast_lambda_lists():
"""Ensure the compiler chokes on invalid lambda-lists"""
cant_compile('(fn [&key {"a" b} &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional a &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional [a b c]] a)')
cant_compile('(fn [&optional [1 2]] (list 1 2))')
def test_ast_print():
code = can_compile("(print \"foo\")").body[0]
assert type(code.value) == ast.Call
def test_ast_tuple():
""" Ensure tuples work. """
code = can_compile("(, 1 2 3)").body[0].value
assert type(code) == ast.Tuple
def test_argument_destructuring():
""" Ensure argument destructuring compilers. """
can_compile("(fn [[a b]] (print a b))")
cant_compile("(fn [[]] 0)")
def test_lambda_list_keywords_rest():
""" Ensure we can compile functions with lambda list keywords."""
can_compile("(fn (x &rest xs) (print xs))")
cant_compile("(fn (x &rest xs &rest ys) (print xs))")
can_compile("(fn (&optional a &rest xs) (print xs))")
def test_lambda_list_keywords_key():
""" Ensure we can compile functions with &key."""
can_compile("(fn (x &key {foo True}) (list x foo))")
cant_compile("(fn (x &key {bar \"baz\"} &key {foo 42}) (list x bar foo))")
cant_compile("(fn (x &key {1 2 3 4}) (list x))")
def test_lambda_list_keywords_kwargs():
""" Ensure we can compile functions with &kwargs."""
can_compile("(fn (x &kwargs kw) (list x kw))")
cant_compile("(fn (x &kwargs xs &kwargs ys) (list x xs ys))")
can_compile("(fn (&optional x &kwargs kw) (list x kw))")
def test_lambda_list_keywords_kwonly():
"""Ensure we can compile functions with &kwonly if we're on Python
3, or fail with an informative message on Python 2."""
kwonly_demo = "(fn [&kwonly a [b 2]] (print a b))"
if PY3:
code = can_compile(kwonly_demo)
for i, kwonlyarg_name in enumerate(('a', 'b')):
assert kwonlyarg_name == code.body[0].args.kwonlyargs[i].arg
assert code.body[0].args.kw_defaults[0] is None
assert code.body[0].args.kw_defaults[1].n == 2
else:
exception = cant_compile(kwonly_demo)
assert isinstance(exception, HyTypeError)
message, = exception.args
assert message == ("keyword-only arguments are only "
"available under Python 3")
def test_lambda_list_keywords_mixed():
""" Ensure we can mix them up."""
can_compile("(fn (x &rest xs &kwargs kw) (list x xs kw))")
cant_compile("(fn (x &rest xs &fasfkey {bar \"baz\"}))")
if PY3:
can_compile("(fn [x &rest xs &kwargs kwxs &kwonly kwoxs]"
" (list x xs kwxs kwoxs))")
def test_missing_keyword_argument_value():
"""Ensure the compiler chokes on missing keyword argument values."""
try:
can_compile("((fn [x] x) :x)")
except HyTypeError as e:
assert(e.message == "Keyword argument :x needs a value.")
else:
assert(False)
def test_ast_unicode_strings():
"""Ensure we handle unicode strings correctly"""
def _compile_string(s):
hy_s = HyString(s)
hy_s.start_line = hy_s.end_line = 0
hy_s.start_column = hy_s.end_column = 0
code = hy_compile([hy_s], "__main__")
# code == ast.Module(body=[ast.Expr(value=ast.Str(s=xxx))])
return code.body[0].value.s
assert _compile_string("test") == "test"
assert _compile_string("\u03b1\u03b2") == "\u03b1\u03b2"
assert _compile_string("\xc3\xa9") == "\xc3\xa9"
def test_compile_error():
"""Ensure we get compile error in tricky cases"""
try:
can_compile("(fn [] (in [1 2 3]))")
except HyTypeError as e:
assert(e.message == "`in' needs at least 2 arguments, got 1.")
else:
assert(False)
def test_for_compile_error():
"""Ensure we get compile error in tricky 'for' cases"""
try:
can_compile("(fn [] (for)")
except LexException as e:
assert(e.message == "Premature end of input")
else:
assert(False)
try:
can_compile("(fn [] (for)))")
except LexException as e:
assert(e.message == "Ran into a RPAREN where it wasn't expected.")
else:
assert(False)
try:
can_compile("(fn [] (for [x] x))")
except HyTypeError as e:
assert(e.message == "`for' requires an even number of args.")
else:
assert(False)
try:
can_compile("(fn [] (for [x xx]))")
except HyTypeError as e:
assert(e.message == "`for' requires a body to evaluate")
else:
assert(False)
try:
can_compile("(fn [] (for [x xx] (else 1)))")
except HyTypeError as e:
assert(e.message == "`for' requires a body to evaluate")
else:
assert(False)
def test_attribute_access():
"""Ensure attribute access compiles correctly"""
can_compile("(. foo bar baz)")
can_compile("(. foo [bar] baz)")
can_compile("(. foo bar [baz] [0] quux [frob])")
can_compile("(. foo bar [(+ 1 2 3 4)] quux [frob])")
cant_compile("(. foo bar :baz [0] quux [frob])")
cant_compile("(. foo bar baz (0) quux [frob])")
cant_compile("(. foo bar baz [0] quux {frob})")
def test_attribute_empty():
"""Ensure using dot notation with a non-expression is an error"""
cant_compile(".")
cant_compile("foo.")
cant_compile(".foo")
cant_compile('"bar".foo')
cant_compile('[2].foo')
def test_cons_correct():
"""Ensure cons gets compiled correctly"""
can_compile("(cons a b)")
def test_invalid_list_comprehension():
"""Ensure that invalid list comprehensions do not break the compiler"""
cant_compile("(genexpr x [])")
cant_compile("(genexpr [x [1 2 3 4]] x)")
cant_compile("(list-comp None [])")
cant_compile("(list-comp [x [1 2 3]] x)")
def test_bad_setv():
"""Ensure setv handles error cases"""
cant_compile("(setv if* 1)")
cant_compile("(setv (a b) [1 2])")
def test_defn():
"""Ensure that defn works correctly in various corner cases"""
cant_compile("(defn if* [] 1)")
cant_compile("(defn \"hy\" [] 1)")
cant_compile("(defn :hy [] 1)")
can_compile("(defn &hy [] 1)")
def test_setv_builtins():
"""Ensure that assigning to a builtin fails, unless in a class"""
cant_compile("(setv None 42)")
cant_compile("(defn get [&rest args] 42)")
can_compile("(defclass A [] (defn get [self] 42))")
can_compile("""
(defclass A []
(defn get [self] 42)
(defclass B []
(defn get [self] 42))
(defn if* [self] 0))
""")
| {
"content_hash": "ddb426f586a01d7ca39acb856d4b08fa",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 78,
"avg_line_length": 28.430823117338004,
"alnum_prop": 0.5998521621288654,
"repo_name": "jakirkham/hy",
"id": "9a1a05e72485d38ee0580d9eb9bd9d8f3fd03e73",
"size": "17408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/compilers/test_ast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1999"
},
{
"name": "Hy",
"bytes": "167835"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "Python",
"bytes": "234550"
}
],
"symlink_target": ""
} |
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'deployments': reverse('deployment-list', request=request, format=format),
'images': reverse('image-list', request=request, format=format),
'nodes': reverse('node-list', request=request, format=format),
'projects': reverse('project-list', request=request, format=format),
'reservations': reverse('reservation-list', request=request, format=format),
'roles': reverse('role-list', request=request, format=format),
'serviceclasses': reverse('serviceclass-list', request=request, format=format),
'serviceresources': reverse('serviceresource-list', request=request, format=format),
'sites': reverse('site-list', request=request, format=format),
'slices': reverse('slice-list', request=request, format=format),
'slivers': reverse('sliver-list', request=request, format=format),
'tags': reverse('tag-list', request=request, format=format),
'users': reverse('user-list', request=request, format=format),
})
| {
"content_hash": "dab28470672bc5a03e96198cf1440b78",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 92,
"avg_line_length": 58.095238095238095,
"alnum_prop": 0.6901639344262295,
"repo_name": "wathsalav/xos",
"id": "4ac267a01dbf0ad888d886d0665a33ff478aa09f",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xos/core/api_root.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "370"
},
{
"name": "CSS",
"bytes": "37088"
},
{
"name": "HTML",
"bytes": "636864"
},
{
"name": "JavaScript",
"bytes": "760492"
},
{
"name": "Makefile",
"bytes": "2717"
},
{
"name": "Python",
"bytes": "1160110"
},
{
"name": "Shell",
"bytes": "10483"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trips', '0009_auto_20160924_0206'),
]
operations = [
migrations.AddField(
model_name='article',
name='domain',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| {
"content_hash": "2c7fdfd43aa71187be84e1d954c33def",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.5964912280701754,
"repo_name": "MianBot/site",
"id": "e4fccd7e4e1f319b47e1dbb888c935a8b75e76dd",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site/trips/migrations/0010_article_domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61395"
},
{
"name": "HTML",
"bytes": "15524"
},
{
"name": "JavaScript",
"bytes": "268812"
},
{
"name": "PHP",
"bytes": "10598"
},
{
"name": "Python",
"bytes": "102786"
},
{
"name": "Ruby",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from app.app_and_db import app, db
from app.startup.init_app import init_app
import os
from flask_sslify import SSLify
from OpenSSL import SSL
from flask_user import UserManager, SQLAlchemyAdapter
from app.users.models import User
init_app(app, db)
#sslify = SSLify(app)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == 'main' or __name__ == "__main__":
app.run(debug=False, port=4000)
| {
"content_hash": "bf11a6252c69cb5f145de1a0135cbb5a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 53,
"avg_line_length": 29.2,
"alnum_prop": 0.7488584474885844,
"repo_name": "TheRushingWookie/MotivateOpen",
"id": "e006c3345c2e0fc4e3cd16ff23410f5d169051dd",
"size": "686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "259"
},
{
"name": "HTML",
"bytes": "16777"
},
{
"name": "Python",
"bytes": "30671"
},
{
"name": "Shell",
"bytes": "24"
}
],
"symlink_target": ""
} |
import sys
import itertools, collections
import tree
try:
_, parsefilename, goldfilename = sys.argv
except:
sys.stderr.write("usage: evalb.py <parse-file> <gold-file>\n")
sys.exit(1)
def _brackets_helper(node, i, result):
i0 = i
if len(node.children) > 0:
for child in node.children:
i = _brackets_helper(child, i, result)
j0 = i
if len(node.children[0].children) > 0: # don't count preterminals
result[node.label, i0, j0] += 1
else:
j0 = i0 + 1
return j0
def brackets(t):
result = collections.defaultdict(int)
_brackets_helper(t.root, 0, result)
return result
matchcount = parsecount = goldcount = 0
for parseline, goldline in itertools.izip(open(parsefilename), open(goldfilename)):
gold = tree.Tree.from_str(goldline)
goldbrackets = brackets(gold)
goldcount += sum(goldbrackets.itervalues())
if parseline.strip() in ["0", ""]:
continue
parse = tree.Tree.from_str(parseline)
parsebrackets = brackets(parse)
parsecount += sum(parsebrackets.itervalues())
for bracket,count in parsebrackets.iteritems():
matchcount += min(count,goldbrackets[bracket])
print "%s\t%d brackets" % (parsefilename, parsecount)
print "%s\t%d brackets" % (goldfilename, goldcount)
print "matching\t%d brackets" % matchcount
print "precision\t%s" % (float(matchcount)/parsecount)
print "recall\t%s" % (float(matchcount)/goldcount)
print "F1\t%s" % (2./(goldcount/float(matchcount) + parsecount/float(matchcount)))
| {
"content_hash": "e862926d6491c2d40fd3600b2ea6320e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 83,
"avg_line_length": 30.9,
"alnum_prop": 0.6653721682847896,
"repo_name": "zaycev/pcfg_parser",
"id": "c62178128f7cadbde6fef1f147d9b575d858ba61",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evalb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21294"
},
{
"name": "Shell",
"bytes": "1217"
}
],
"symlink_target": ""
} |
import queue
ROOM_ENTERED = 'roomEntered'
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
| {
"content_hash": "9a4b3b47f3990bdf597d6ec2ea0d9484",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 31.138461538461538,
"alnum_prop": 0.6388339920948617,
"repo_name": "JeFaProductions/TextAdventure2",
"id": "706720f9584247fe1387ca09ddee93c7af47920a",
"size": "2024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tead/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "192"
},
{
"name": "Python",
"bytes": "22605"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_auto_20151025_1910'),
]
operations = [
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('type', models.CharField(max_length=120, choices=[('billing', 'Billing'), ('shipping', 'Shipping')])),
('street', models.CharField(max_length=120)),
('city', models.CharField(max_length=120)),
('state', models.CharField(max_length=120)),
('zipcode', models.CharField(max_length=120)),
('user', models.ForeignKey(to='orders.UserCheckout')),
],
),
]
| {
"content_hash": "60e6b755eecd906ce585f4f396bdc20e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 119,
"avg_line_length": 35.48,
"alnum_prop": 0.5625704622322435,
"repo_name": "abhijitbangera/ecommerce",
"id": "2baafe5c77282356aa1a288da384462ad056e69e",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/orders/migrations/0003_useraddress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "902"
},
{
"name": "CSS",
"bytes": "44031"
},
{
"name": "HTML",
"bytes": "26473"
},
{
"name": "JavaScript",
"bytes": "79859"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "85112"
}
],
"symlink_target": ""
} |
import random
import engine
from network import server
from network import service
from protocols import gate_to_game
from protocols import game_to_gate
class Connection:
def __init__(self):
self.gameid = None
def connection_made(self):
print('game connection made')
def connection_lost(self):
print('game connection lost')
def connection_ready(self):
print('game connection ready')
def game_server_ready(self, gameid):
self.gameid = gameid
engine.server().game_mgr.game_ready(gameid, self.cid)
def create_player_client(self, cid, eid, name):
print('create client entity', cid, eid, name, self.gameid)
client = engine.server().get_client(cid)
client.remote.create_player_client(eid, name)
engine.server().on_entity_created(eid, cid, self.gameid)
def client_msg(self, cid, eid, data):
client = engine.server().get_client(cid)
client.remote.client_msg(eid, data)
def entity_msg_return(self, cid, eid, token, data):
client = engine.server().get_client(cid)
client.remote.entity_msg_return(eid, token, data)
def send_entity_defs(self, cid, client_defs, server_defs):
client = engine.server().get_client(cid)
client.remote.send_entity_defs(client_defs, server_defs)
class GameMgr:
def __init__(self):
self.games = {} # {gid: cid}
def game_ready(self, gameid, cid):
self.games[gameid] = cid
def serve(self):
gate_service = service.gen_service(game_to_gate.GameToGate())
game_service = service.gen_service(gate_to_game.GateToGame())
config = engine.config()
self.server = server.Server(Connection, config['net_option'], \
game_service, gate_service)
self.server.start(config['gport'])
def get_free_game(self):
games = list(self.server.connections.values())
return random.choice(games)
def get_game(self, gameid):
cid = self.games[gameid]
return self.server.get_connection(cid)
| {
"content_hash": "db62da76cc6be46a5dcf682fbeea69e4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 65,
"avg_line_length": 25.602739726027398,
"alnum_prop": 0.7142857142857143,
"repo_name": "dennisding/ether",
"id": "2b39f3d550d5e20829a01ea7faeebf646389975b",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gate/game_mgr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "75989"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import OrderedDict
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.urls import NoReverseMatch, Resolver404, get_script_prefix, resolve
from django.utils import six
from django.utils.encoding import (
python_2_unicode_compatible, smart_text, uri_to_iri
)
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.fields import (
Field, empty, get_attribute, is_simple_callable, iter_options
)
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.utils import html
def method_overridden(method_name, klass, instance):
"""
Determine if a method has been overridden.
"""
method = getattr(klass, method_name)
default_method = getattr(method, '__func__', method) # Python 3 compat
return default_method is not getattr(instance, method_name).__func__
class Hyperlink(six.text_type):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
"""
def __new__(self, url, obj):
ret = six.text_type.__new__(self, url)
ret.obj = obj
return ret
def __getnewargs__(self):
return(str(self), self.name,)
@property
def name(self):
# This ensures that we only called `__str__` lazily,
# as in some cases calling __str__ on a model instances *might*
# involve a database lookup.
return six.text_type(self.obj)
is_hyperlink = True
@python_2_unicode_compatible
class PKOnlyObject(object):
"""
This is a mock object, used for when we only need the pk of the object
instance, but still want to return an object with a .pk attribute,
in order to keep the same interface as a regular model instance.
"""
def __init__(self, pk):
self.pk = pk
def __str__(self):
return "%s" % self.pk
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
MANY_RELATION_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty',
'html_cutoff', 'html_cutoff_text'
)
class RelatedField(Field):
queryset = None
html_cutoff = None
html_cutoff_text = None
def __init__(self, **kwargs):
self.queryset = kwargs.pop('queryset', self.queryset)
cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF
if cutoff_from_settings is not None:
cutoff_from_settings = int(cutoff_from_settings)
self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)
self.html_cutoff_text = kwargs.pop(
'html_cutoff_text',
self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)
)
if not method_overridden('get_queryset', RelatedField, self):
assert self.queryset is not None or kwargs.get('read_only', None), (
'Relational field must provide a `queryset` argument, '
'override `get_queryset`, or set read_only=`True`.'
)
assert not (self.queryset is not None and kwargs.get('read_only', None)), (
'Relational fields should not provide a `queryset` argument, '
'when setting read_only=`True`.'
)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
super(RelatedField, self).__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ManyRelatedField` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(RelatedField, cls).__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
# We force empty strings to None values for relational fields.
if data == '':
data = None
return super(RelatedField, self).run_validation(data)
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, (QuerySet, Manager)):
# Ensure queryset is re-evaluated whenever used.
# Note that actually a `Manager` class may also be used as the
# queryset argument. This occurs on ModelSerializer fields,
# as it allows us to generate a more expressive 'repr' output
# for the field.
# Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'
queryset = queryset.all()
return queryset
def use_pk_only_optimization(self):
return False
def get_attribute(self, instance):
if self.use_pk_only_optimization() and self.source_attrs:
# Optimized case, return a mock object only containing the pk attribute.
try:
instance = get_attribute(instance, self.source_attrs[:-1])
value = instance.serializable_value(self.source_attrs[-1])
if is_simple_callable(value):
# Handle edge case where the relationship `source` argument
# points to a `get_relationship()` method on the model
value = value().pk
return PKOnlyObject(pk=value)
except AttributeError:
pass
# Standard case, return the object instance.
return get_attribute(instance, self.source_attrs)
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([
(
self.to_representation(item),
self.display_value(item)
)
for item in queryset
])
@property
def choices(self):
return self.get_choices()
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.get_choices(cutoff=self.html_cutoff),
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
def display_value(self, instance):
return six.text_type(instance)
class StringRelatedField(RelatedField):
"""
A read only field that represents its targets using their
plain string representation.
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super(StringRelatedField, self).__init__(**kwargs)
def to_representation(self, value):
return six.text_type(value)
class PrimaryKeyRelatedField(RelatedField):
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
super(PrimaryKeyRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return True
def to_internal_value(self, data):
if self.pk_field is not None:
data = self.pk_field.to_internal_value(data)
try:
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
if self.pk_field is not None:
return self.pk_field.to_representation(value.pk)
return value.pk
class HyperlinkedRelatedField(RelatedField):
lookup_field = 'pk'
view_name = None
default_error_messages = {
'required': _('This field is required.'),
'no_match': _('Invalid hyperlink - No URL match.'),
'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'),
'does_not_exist': _('Invalid hyperlink - Object does not exist.'),
'incorrect_type': _('Incorrect type. Expected URL string, received {data_type}.'),
}
def __init__(self, view_name=None, **kwargs):
if view_name is not None:
self.view_name = view_name
assert self.view_name is not None, 'The `view_name` argument is required.'
self.lookup_field = kwargs.pop('lookup_field', self.lookup_field)
self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field)
self.format = kwargs.pop('format', None)
# We include this simply for dependency injection in tests.
# We can't add it as a class attributes or it would expect an
# implicit `self` argument to be passed.
self.reverse = reverse
super(HyperlinkedRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return self.lookup_field == 'pk'
def get_object(self, view_name, view_args, view_kwargs):
"""
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
"""
lookup_value = view_kwargs[self.lookup_url_kwarg]
lookup_kwargs = {self.lookup_field: lookup_value}
return self.get_queryset().get(**lookup_kwargs)
def get_url(self, obj, view_name, request, format):
"""
Given an object, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Unsaved objects will not yet have a valid URL.
if hasattr(obj, 'pk') and obj.pk in (None, ''):
return None
lookup_value = getattr(obj, self.lookup_field)
kwargs = {self.lookup_url_kwarg: lookup_value}
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
def to_internal_value(self, data):
request = self.context.get('request', None)
try:
http_prefix = data.startswith(('http:', 'https:'))
except AttributeError:
self.fail('incorrect_type', data_type=type(data).__name__)
if http_prefix:
# If needed convert absolute URLs to relative path
data = urlparse.urlparse(data).path
prefix = get_script_prefix()
if data.startswith(prefix):
data = '/' + data[len(prefix):]
data = uri_to_iri(data)
try:
match = resolve(data)
except Resolver404:
self.fail('no_match')
try:
expected_viewname = request.versioning_scheme.get_versioned_viewname(
self.view_name, request
)
except AttributeError:
expected_viewname = self.view_name
if match.view_name != expected_viewname:
self.fail('incorrect_match')
try:
return self.get_object(match.view_name, match.args, match.kwargs)
except (ObjectDoesNotExist, TypeError, ValueError):
self.fail('does_not_exist')
def to_representation(self, value):
assert 'request' in self.context, (
"`%s` requires the request in the serializer"
" context. Add `context={'request': request}` when instantiating "
"the serializer." % self.__class__.__name__
)
request = self.context['request']
format = self.context.get('format', None)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
" WARNING: The value of the field on the model instance "
"was %s, which may be why it didn't match any "
"entries in your URL conf." % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
return None
return Hyperlink(url, value)
class HyperlinkedIdentityField(HyperlinkedRelatedField):
"""
A read-only field that represents the identity URL for an object, itself.
This is in contrast to `HyperlinkedRelatedField` which represents the
URL of relationships to other objects.
"""
def __init__(self, view_name=None, **kwargs):
assert view_name is not None, 'The `view_name` argument is required.'
kwargs['read_only'] = True
kwargs['source'] = '*'
super(HyperlinkedIdentityField, self).__init__(view_name, **kwargs)
def use_pk_only_optimization(self):
# We have the complete object instance already. We don't need
# to run the 'only get the pk for this relationship' code.
return False
class SlugRelatedField(RelatedField):
"""
A read-write field that represents the target of the relationship
by a unique 'slug' attribute.
"""
default_error_messages = {
'does_not_exist': _('Object with {slug_name}={value} does not exist.'),
'invalid': _('Invalid value.'),
}
def __init__(self, slug_field=None, **kwargs):
assert slug_field is not None, 'The `slug_field` argument is required.'
self.slug_field = slug_field
super(SlugRelatedField, self).__init__(**kwargs)
def to_internal_value(self, data):
try:
return self.get_queryset().get(**{self.slug_field: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data))
except (TypeError, ValueError):
self.fail('invalid')
def to_representation(self, obj):
return getattr(obj, self.slug_field)
class ManyRelatedField(Field):
"""
Relationships with `many=True` transparently get coerced into instead being
a ManyRelatedField with a child relationship.
The `ManyRelatedField` class is responsible for handling iterating through
the values and passing each one to the child relationship.
This class is treated as private API.
You shouldn't generally need to be using this class directly yourself,
and should instead simply set 'many=True' on the relationship.
"""
initial = []
default_empty_html = []
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'empty': _('This list may not be empty.')
}
html_cutoff = None
html_cutoff_text = None
def __init__(self, child_relation=None, *args, **kwargs):
self.child_relation = child_relation
self.allow_empty = kwargs.pop('allow_empty', True)
cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF
if cutoff_from_settings is not None:
cutoff_from_settings = int(cutoff_from_settings)
self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)
self.html_cutoff_text = kwargs.pop(
'html_cutoff_text',
self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)
)
assert child_relation is not None, '`child_relation` is a required argument.'
super(ManyRelatedField, self).__init__(*args, **kwargs)
self.child_relation.bind(field_name='', parent=self)
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
# Don't return [] if the update is partial
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.fail('empty')
return [
self.child_relation.to_internal_value(item)
for item in data
]
def get_attribute(self, instance):
# Can't have any relationships if not created
if hasattr(instance, 'pk') and instance.pk is None:
return []
relationship = get_attribute(instance, self.source_attrs)
return relationship.all() if hasattr(relationship, 'all') else relationship
def to_representation(self, iterable):
return [
self.child_relation.to_representation(value)
for value in iterable
]
def get_choices(self, cutoff=None):
return self.child_relation.get_choices(cutoff)
@property
def choices(self):
return self.get_choices()
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.get_choices(cutoff=self.html_cutoff),
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
| {
"content_hash": "e310c84dbc1c5a31f849622d6b49e99c",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 90,
"avg_line_length": 36.51865671641791,
"alnum_prop": 0.6163277817513028,
"repo_name": "jpadilla/django-rest-framework",
"id": "22078e64a4f620d3d8ab835ba6c065c2cb0a0d8d",
"size": "19590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework/relations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39386"
},
{
"name": "HTML",
"bytes": "83318"
},
{
"name": "JavaScript",
"bytes": "18201"
},
{
"name": "Python",
"bytes": "1193432"
}
],
"symlink_target": ""
} |
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
NO_REMOTES_MESSAGE = "You have not configured any remotes."
START_PUSH_MESSAGE = "Starting push..."
END_PUSH_MESSAGE = "Push complete."
PUSH_TO_BRANCH_NAME_PROMPT = "Enter remote branch name:"
SET_UPSTREAM_PROMPT = ("You have not set an upstream for the active branch. "
"Would you like to set one?")
CONFIRM_FORCE_PUSH = ("You are about to `git push --force`. Would you "
"like to proceed?")
class PushBase(GitCommand):
set_upstream = False
def do_push(self, remote, branch, force=False, local_branch=None):
"""
Perform `git push remote branch`.
"""
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if force and savvy_settings.get("confirm_force_push"):
if not sublime.ok_cancel_dialog(CONFIRM_FORCE_PUSH):
return
sublime.status_message(START_PUSH_MESSAGE)
self.push(
remote,
branch,
set_upstream=self.set_upstream,
force=force,
local_branch=local_branch
)
sublime.status_message(END_PUSH_MESSAGE)
util.view.refresh_gitsavvy(self.window.active_view())
class GsPushCommand(WindowCommand, PushBase):
"""
Perform a normal `git push`.
"""
def run(self, force=False):
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if savvy_settings.get("prompt_for_tracking_branch") and not self.get_upstream_for_active_branch():
if sublime.ok_cancel_dialog(SET_UPSTREAM_PROMPT):
self.window.run_command("gs_push_to_branch_name", {
"set_upstream": True,
"force": force
})
else:
sublime.set_timeout_async(lambda: self.do_push(None, None, force=force))
class GsPushToBranchCommand(WindowCommand, PushBase):
"""
Through a series of panels, allow the user to push to a specific remote branch.
"""
def run(self):
sublime.set_timeout_async(self.run_async)
def run_async(self):
"""
Display a panel of all remotes defined for the repo, then proceed to
`on_select_remote`. If no remotes are defined, notify the user and
proceed no further.
"""
self.remotes = list(self.get_remotes().keys())
if not self.remotes:
self.window.show_quick_panel([NO_REMOTES_MESSAGE], None)
else:
pre_selected_idx = (self.remotes.index(self.last_remote_used)
if self.last_remote_used in self.remotes
else 0)
self.window.show_quick_panel(
self.remotes,
self.on_select_remote,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_idx
)
def on_select_remote(self, remote_index):
"""
After the user selects a remote, display a panel of branches that are
present on that remote, then proceed to `on_select_branch`.
"""
# If the user pressed `esc` or otherwise cancelled.
if remote_index == -1:
return
self.selected_remote = self.remotes[remote_index]
self.last_remote_used = self.selected_remote
self.branches_on_selected_remote = self.list_remote_branches(self.selected_remote)
current_local_branch = self.get_current_branch_name()
try:
pre_selected_idx = self.branches_on_selected_remote.index(
self.selected_remote + "/" + current_local_branch)
except ValueError:
pre_selected_idx = 0
def deferred_panel():
self.window.show_quick_panel(
self.branches_on_selected_remote,
self.on_select_branch,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_idx
)
sublime.set_timeout(deferred_panel)
def on_select_branch(self, branch_index):
"""
Determine the actual branch name of the user's selection, and proceed
to `do_push`.
"""
# If the user pressed `esc` or otherwise cancelled.
if branch_index == -1:
return
selected_branch = self.branches_on_selected_remote[branch_index].split("/", 1)[1]
sublime.set_timeout_async(lambda: self.do_push(self.selected_remote, selected_branch))
class GsPushToBranchNameCommand(WindowCommand, PushBase):
"""
Prompt for remote and remote branch name, then push.
"""
def run(self, set_upstream=False, force=False):
self.set_upstream = set_upstream
self.force = force
sublime.set_timeout_async(self.run_async)
def run_async(self):
"""
Display a panel of all remotes defined for the repo, then proceed to
`on_select_remote`. If no remotes are defined, notify the user and
proceed no further.
"""
self.remotes = list(self.get_remotes().keys())
if not self.remotes:
self.window.show_quick_panel([NO_REMOTES_MESSAGE], None)
else:
pre_selected_idx = (self.remotes.index(self.last_remote_used)
if self.last_remote_used in self.remotes
else 0)
self.window.show_quick_panel(
self.remotes,
self.on_select_remote,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_idx
)
def on_select_remote(self, remote_index):
"""
After the user selects a remote, prompt the user for a branch name.
"""
# If the user pressed `esc` or otherwise cancelled.
if remote_index == -1:
return
self.selected_remote = self.remotes[remote_index]
self.last_remote_used = self.selected_remote
current_local_branch = self.get_current_branch_name()
self.window.show_input_panel(
PUSH_TO_BRANCH_NAME_PROMPT,
current_local_branch,
self.on_entered_branch_name,
None,
None
)
def on_entered_branch_name(self, branch):
"""
Push to the remote that was previously selected and provided branch
name.
"""
sublime.set_timeout_async(lambda: self.do_push(
self.selected_remote,
branch,
self.force,
local_branch=self.get_current_branch_name()))
| {
"content_hash": "1b2817a278768a66af8b6c468d414d48",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 106,
"avg_line_length": 33.984771573604064,
"alnum_prop": 0.5856609410007468,
"repo_name": "asfaltboy/GitSavvy",
"id": "a8934684840c80e8323c38c3bae7d1e2f0eaebc9",
"size": "6695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/commands/push.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118"
},
{
"name": "HTML",
"bytes": "5477"
},
{
"name": "JavaScript",
"bytes": "84855"
},
{
"name": "Python",
"bytes": "375976"
}
],
"symlink_target": ""
} |
__engines__ = {}
def get_engine_list():
return sorted(__engines__.keys())
def get_engine(name, default=None):
return __engines__.get(name, default)
def register_engine(name, engine):
__engines__[name] = engine
# maybe an engine should be a class that supports two methods:
# "process" and "documentation"?
def docutils_engine(content):
from docutils.core import publish_parts
meta = {}
pp = publish_parts(content, writer_name="html")
body = pp["html_body"]
meta['title'] = pp['title']
return body, meta
def textile_engine(content):
import textile
meta = {}
body = textile.textile(content.encode('utf-8'), encoding='utf-8', output='utf-8')
body = unicode(body, 'utf-8')
body = u'<div class="document">%s</div>' % body
return body, meta
def wrap_engine(content):
meta = {}
body = u'<div class="document">%s</div>' % content
return body, meta
def aether_engine(content):
import aether_markup
meta = {}
body = aether_markup.markup(content, meta, outermost=True)
return body, meta
def null_engine(content):
meta = {}
return content, meta
# This could be also done if all the markup parsers/generators supported a
# common interface and supported a setuptools 'entrypoint'.
register_engine('inline-text/x-rst', docutils_engine)
register_engine('inline-text/x-textile', textile_engine)
register_engine('inline-text/x-aether', aether_engine)
register_engine('inline-text/html', wrap_engine)
register_engine('text/plain', null_engine)
register_engine('text/html', null_engine)
__all__ = [ 'register_engine', 'get_engine_list', 'get_engine' ]
| {
"content_hash": "d5238b311281675160b4d7a7fe21b8d7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 85,
"avg_line_length": 29.321428571428573,
"alnum_prop": 0.6766138855054811,
"repo_name": "gdamjan/vezilka",
"id": "44402c2c30bb359ed57126ec92c59eca850dc802",
"size": "1667",
"binary": false,
"copies": "1",
"ref": "refs/heads/werkzeug",
"path": "vezilka/model/markup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2861"
},
{
"name": "Python",
"bytes": "39300"
}
],
"symlink_target": ""
} |
from ScrollText import ScrollText
from Tkinter import *
from Prolog import PrologException
from FastIndex import FastIndex
import ErrDialog
import re
import AnnotationColours
import sets
#import Colour
import parser
def startCompare(x, y):
(xLine, xCol) = x[1].split('.')
(yLine, yCol) = y[1].split('.')
if xLine == yLine:
return int(xCol) - int(yCol)
else:
return int(xLine) - int(yLine)
class PrologFrame(ScrollText):
menu = None
annotation_menu = None
ann_changed = None
ErrMsg = None
filter_tags = None
filter_colour = None
annotation_colour = None
app = None
def colours_changed(self, new_colours):
for t in new_colours:
self.text.tag_configure(t, foreground=new_colours[t])
def __init__(self,master=None, text="",readonly=True,app=None):
ScrollText.__init__(self,master=master,text=text,readonly=readonly)
self.app = app
app.pref.register_listener('syntax highlights', self.colours_changed)
self.colours_changed(app.colours.syn_colour)
self.text.tag_configure("hilite", background="grey")
## force the selection cursor to have higher priority than
## all annotations
self.text.tag_raise("sel",aboveThis="hilite")
def load_source(self, file, syntax=True):
if syntax:
syntax = self.app.logen.get_syntax(file)
self.load(file)
if syntax:
fast_idx = FastIndex(self.text.get("1.0", "end"))
self.highlight_list(syntax, fast_idx)
return fast_idx
def parse_check(self):
self.text.tag_remove("errorTag", "1.0", "end")
try:
self.app.logen.parse_file(self.filename)
except PrologException, (Type,Msg):
lineno = re.compile('.*lines::([0-9]+)-([0-9]+)\n.*')
m = lineno.search(Msg)
if m is not None:
line = m.group(1)
lineend = m.group(2)
self.text.tag_add("errorTag", "%s.0"%line,"%s.0 lineend"%lineend)
ErrDialog.complain_loudly(master=self,title="Parse Error:::",msg=Msg+"\nPlease fix errors before entering annotation mode")
return False
return True
def highlight_list(self, syntax, fast_idx, tcl_index=False):
#print syntax
self.ErrMsg = None
clp = True
if self.text.search("{","1.0") == "":
clp = False
for i in xrange(0, len(syntax), 3):
tag = syntax[i + 2]
if tcl_index:
### they are already in tcl form so leave them alone...
(index, index1) = (syntax[i], syntax[i+1])
else:
start = int(syntax[i]) - 1
end = int(syntax[i + 1]) - 1
(index, index1) = fast_idx.get_two_tk_indices(start, end)
if clp:
if self.text.get(index,index1).startswith('{'):
clpend = self.text.search("}",index1, fast_idx.line_end(index))
index1 = fast_idx.next_char(clpend)
self.text.tag_add(tag, index, index1)
def set_text_and_highlight(self, text, syntax, fast_idx, tcl_index=False):
self.set_text(text)
if syntax is not None:
self.highlight_list(syntax, fast_idx, tcl_index)
def save(self, filename):
#if not self.app.logen.check_string_syntax(self.text.get("1.0", "end")):
# return False
#steve remove parse check...
ScrollText.save(self, filename)
return True
def get_tag_position(self, ann, index):
newindex = self.text.index(index + " + 1 char")
return self.text.tag_prevrange(ann, newindex)
#steve refactored a bit and moved these into here (instead of Ann Frame)
def get_call_args(self, end, upto=None, tags=None):
if upto is None:
(uptoTest,upto, _) = self.get_next_ann(end, tags)
if uptoTest == "":
upto = "end"
posargs = self.text.get(end, upto)
return parser.get_arity(posargs)
def get_all_heads(self, text=None):
if (text is None):
text = self.text
all_heads = sets.Set()
iter = self.ann_iter("head", retval="text_index")
heads = self.text.tag_ranges('head')
for i in xrange(0, len(heads), 2):
start = heads[i]
end = heads[i + 1]
head = text.get(start, end)
# we guess by doing upto the next ann (or end of file..)
if i < len(heads) - 2:
next_head = heads[i + 2]
else:
next_head = "end"
(arity, args) = self.get_call_args(end, upto=next_head)
all_heads.add("%s/%d" % (head, arity))
return all_heads
def get_prev_ann(self, cur_loc, tag_list):
text = self.text
next_start = text.index("1.0")
start = ""
for tag in tag_list:
i = text.tag_prevrange(tag, cur_loc)
if i != ():
(cur_start, cur_end) = i
if text.compare(cur_start, ">", next_start):
next_start = cur_start
start = (tag, cur_start, cur_end)
if start == "":
return ("", 0.0, 0.0)
else:
return start
def get_next_ann(self, cur_loc, tag_list):
text = self.text
next_start = text.index("end")
start = ""
for tag in tag_list:
i = text.tag_nextrange(tag, cur_loc)
if i != ():
(cur_start, cur_end) = i
if text.compare(cur_start, "<", next_start):
next_start = cur_start
start = (tag, cur_start, cur_end)
if start == "":
return ("", 0.0, 0.0)
else:
return start
def ann_iter(self,Annotation,Start="1.0", retval= "text"):
text = self.text
current = Start
i = text.tag_nextrange(Annotation, current)
while i != ():
(start, current) = i
if retval == "text":
yield text.get(start,current)
elif retval == "index":
yield i
elif retval == "text_index":
yield (text.get(start,current), start,current)
i = text.tag_nextrange(Annotation, current)
## HTML PRINTING...
def get_all_tags(self, text=None):
if text is None:
text = self.text
tags = []
for tag in text.tag_names():
tag_list = text.tag_ranges(tag)
for i in range(0, len(tag_list), 2):
tags.append((tag, tag_list[i], tag_list[i + 1]))
tags.sort(startCompare)
return tags
def get_next_tag(self, cur_loc, text=None):
if text is None:
text = self.text
next_start = text.index("end")
start = ""
for tag in text.tag_names():
i = text.tag_nextrange(tag, cur_loc)
if i != ():
(cur_start, cur_end) = i
if text.compare(cur_start, "<", next_start):
next_start = cur_start
start = (tag, cur_start, cur_end)
if start == "":
return ("", 0.0, 0.0)
else:
return start
def htmlstring(self,str):
str = str.replace("\t", " ")
str = str.replace(" "," ")
#str(" "," ")
#return "<tt>%s</tt>"%str
return str
def convertColour(self,colour):
if colour == "":
return "#000000"
rgb = self.text.winfo_rgb(colour)
red, green, blue = rgb[0]/256, rgb[1]/256, rgb[2]/256
return "#%02x%02x%02x" % (red,green,blue)
#if colour == "hotpink1":
# return "gray"
#return colour
def getHtmlTag(self, text, ann):
#openTag = '<FONT COLOR="%s">' % (self.convertColour(text.tag_cget(ann,"foreground")))
#closeTag = '</FONT>'
if text.tag_cget(ann,"underline") != "":
under = "text-decoration:underline;"
else:
under = ""
openTag = '<p class="code" style="color:%s;%s">'%(self.convertColour(text.tag_cget(ann,"foreground")),under)
closeTag = '</p>'
return (openTag,closeTag)
def line_to_html(self,text, line):
html = ""
cur_loc = line
lineend = text.index("%s lineend" % line)
#print "Processing line ", line, lineend
stop = "0.0"
start = "0.0"
(ann, start, stop) = self.get_next_tag(cur_loc)
while(start != "" and text.compare(start, "<=", lineend) and text.compare(start,">=", line) and text.compare(stop,"<=", lineend)):
html += '<p class="code">'+self.htmlstring(text.get(cur_loc, start))+ "</p>"
(openTag, closeTag) = self.getHtmlTag(text,ann)
html += openTag
html += self.htmlstring(text.get(start,stop))
cur_loc = stop
html += closeTag
(ann, start, stop) = self.get_next_tag(cur_loc)
if text.compare(stop,">=", lineend) and text.compare(start, "<=", lineend):
(openTag, closeTag) = self.getHtmlTag(text,ann)
html += openTag
html += self.htmlstring(text.get(cur_loc, lineend))
html += closeTag
else:
html += '<p class="code">'+self.htmlstring(text.get(cur_loc, lineend))+'</p>'
return html
def to_html(self, text =None, html_mode=True):
print "Converting to html"
html = ""
if text is None:
text = self.text
#remove non print tags
text.tag_remove("errorTag", "1.0", "end")
text.tag_remove("unsafe", "1.0", "end")
#start at begining
cur_loc = "1.0"
prev_loc = "0.0"
while cur_loc != prev_loc:
#print cur_loc
if html_mode:
html += self.line_to_html(text,cur_loc) + "\n"
else:
#html += self.line_to_tex(text, cur_loc) + "\n"
pass
prev_loc = cur_loc
cur_loc = text.index("%s +1 line linestart" % cur_loc)
return html
| {
"content_hash": "1b10c6a7c400eb0399542eda66de778f",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 138,
"avg_line_length": 31.821958456973295,
"alnum_prop": 0.5014919806042522,
"repo_name": "leuschel/logen",
"id": "b8587cbd804fae40899011a96360a5b0916c3197",
"size": "10724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_logen/pylogen/PrologFrame.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "712"
},
{
"name": "CSS",
"bytes": "10574"
},
{
"name": "Groff",
"bytes": "3454"
},
{
"name": "HTML",
"bytes": "11971"
},
{
"name": "JavaScript",
"bytes": "3412"
},
{
"name": "Makefile",
"bytes": "4229"
},
{
"name": "OpenEdge ABL",
"bytes": "90269"
},
{
"name": "PHP",
"bytes": "103934"
},
{
"name": "Perl",
"bytes": "1525"
},
{
"name": "Perl6",
"bytes": "125"
},
{
"name": "Prolog",
"bytes": "2316571"
},
{
"name": "Python",
"bytes": "972523"
},
{
"name": "Shell",
"bytes": "15458"
},
{
"name": "Tcl",
"bytes": "63623"
},
{
"name": "TeX",
"bytes": "102325"
},
{
"name": "XSLT",
"bytes": "26887"
}
],
"symlink_target": ""
} |
import requests
# Do work within JSON
import json
# Set our Auth URL
auth_url = 'https://identity.api.rackspacecloud.com/v2.0/tokens'
# Set our Openstack User Name
user_name = '<SOMEUSERID>'
# Set our Openstack API Key
api_key = '<SOMERAXAPIKEY>'
# Create some basic Headers
basic_headers = {'Content-Type': 'application/json'}
# Build the POST Data
post_data = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": "%s" % user_name,
"apiKey": "%s" % api_key
}
}
}
service_catalog = requests.post(
auth_url,
headers=basic_headers,
data=json.dumps(post_data)
)
service_catalog_dict = json.loads(service_catalog.content)
access = service_catalog_dict['access']
all_service_catalog_endpoints = access['serviceCatalog']
for service in all_service_catalog_endpoints:
if 'name' in service and service['name'] == 'cloudFiles':
print service
# print json.dumps(service_catalog.json(), indent=2)
print service_catalog.status_code
| {
"content_hash": "370f330b85958ef754a43d09a72a87af",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 64,
"avg_line_length": 19.169811320754718,
"alnum_prop": 0.6692913385826772,
"repo_name": "os-cloud/intros",
"id": "38d171221f690219f7bf843cd4ae3ba6e6c9f5d9",
"size": "1037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "list-files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2435"
},
{
"name": "Shell",
"bytes": "10167"
}
],
"symlink_target": ""
} |
import io
import os
import re
from setuptools import setup, find_packages
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django_auth_ldap3_ad_backend',
version=find_version('django_auth_ldap3_ad_backend', '__init__.py'),
description='Django authenticate backend for Windows Active Directory with LDAP3 library',
long_description='Django authenticate backend for Windows Active Directory with LDAP3 library',
url='https://github.com/thinkAmi/django-auth-ldap3-ad-backend',
author='thinkAmi',
author_email='dev.thinkami@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
],
keywords='django auth backend ldap activedirectory',
packages=find_packages(exclude=['test*']),
install_requires=[
'ldap3>=0.9.9',
'django>=1.8'
],
) | {
"content_hash": "0465ff8040dd672cebf19611474794c8",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 32.45454545454545,
"alnum_prop": 0.6211484593837535,
"repo_name": "thinkAmi/django-auth-ldap3-ad-backend",
"id": "67592aab6086a7d3386afb471a5d88ca57572ae6",
"size": "1428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1434"
},
{
"name": "Python",
"bytes": "7677"
}
],
"symlink_target": ""
} |
import networkx as nx
from scipy.spatial import ConvexHull
import matplotlib.pyplot as plt
import logging
from django.db import transaction
import qmpy
from qmpy.utils import *
from . import phase
from .reaction import Reaction
from .equilibrium import Equilibrium
logger = logging.getLogger(__name__)
if qmpy.FOUND_PULP:
import pulp
else:
logger.warn("Cannot import PuLP, cannot do GCLP")
class PhaseSpaceError(Exception):
pass
class Heap(dict):
def add(self, seq):
if len(seq) == 1:
self[seq[0]] = Heap()
return
seq = sorted(seq)
e0 = seq[0]
if e0 in self:
self[e0].add(seq[1:])
else:
self[e0] = Heap()
self[e0].add(seq[1:])
@property
def sequences(self):
seqs = []
for k, v in list(self.items()):
if not v:
seqs.append([k])
else:
for v2 in v.sequences:
seqs.append([k] + v2)
return seqs
class PhaseSpace(object):
"""
A PhaseSpace object represents, naturally, a region of phase space.
The most fundamental property of a PhaseSpace is its bounds,
which are given as a hyphen-delimited list of compositions. These represent
the extent of the phase space, and determine which phases are within the
space.
Next, a PhaseSpace has an attribute, data, which is a PhaseData object,
and is a container for Phase objects, which are used when performing
thermodynamic analysis on this space.
The majority of attributes are lazy, that is, they are only computed when
they are requested, and how to get them (of which there are often several
ways) is decided based on the size and shape of the phase space.
"""
def __init__(self, bounds, mus=None, data=None, **kwargs):
"""
Arguments:
bounds:
Sequence of compositions. Can be comma-delimited ("Fe,Ni,O"),
an actual list (['Fe', 'Ni', 'O']) or any other python
sequence. The compositions need not be elements, if you want to
take a slice through the Fe-Ni-O phase diagram between Fe3O4
and NiO, just do "Fe3O4-NiO".
Keyword Arguments
mus:
define a dictionary of chemical potentials. Will adjust all
calculated formation energies accordingly.
data:
If supplied with a PhaseData instance, it will be used
instead of loading from the OQMD. Can be used to significantly
reduce the amount of time spent querying the database when looping
through many PhaseSpaces.
Examples::
>>> ps = PhaseSpace('Fe-Li-O', load="legacy.dat")
>>> ps2 = PhaseSpace(['Fe','Li','O'], data=ps.data)
>>> ps = PhaseSpace(set(['Li', 'Ni', 'O']))
>>> ps = PhaseSpace('Li2O-Fe2O3')
"""
self.clear_all()
self.set_mus(mus)
self.set_bounds(bounds)
if data is None:
self.data = phase.PhaseData()
if bounds:
self.load(**kwargs)
else:
self.data = data.get_phase_data(self.space)
def __repr__(self):
if self.bounds is None:
return "<unbounded PhaseSpace>"
names = [format_comp(reduce_comp(b)) for b in self.bounds]
bounds = "-".join(names)
if self.mus:
bounds += " " + format_mus(self.mus)
return "<PhaseSpace bound by %s>" % bounds
def __getitem__(self, i):
return self.phases[i]
def __len__(self):
return len(self.phases)
def set_bounds(self, bounds):
bounds = parse_space(bounds)
if bounds is None:
self.bounds = None
return
elements = sorted(set.union(*[set(b.keys()) for b in bounds]))
basis = []
for b in bounds:
basis.append([b.get(k, 0) for k in elements])
self.bounds = bounds
self.basis = np.array(basis)
def infer_formation_energies(self):
mus = {}
for elt in self.space:
if elt in self.phase_dict:
mus[elt] = self.phase_dict[elt].energy
else:
mus[elt] = 0.0
for phase in self.phases:
for elt in self.space:
phase.energy -= phase.unit_comp.get(elt, 0) * mus[elt]
def set_mus(self, mus):
self.mus = {}
if mus is None:
return
elif isinstance(mus, str):
mus = mus.replace(",", " ")
for mu in mus.split():
self.mus.update(parse_mu(mu))
elif isinstance(mus, dict):
self.mus = mus
def load(self, **kwargs):
"""
Loads oqmd data into the associated PhaseData object.
"""
target = kwargs.get("load", "oqmd")
if not target:
return
stable = kwargs.get("stable", False)
fit = kwargs.get("fit", "standard")
total = kwargs.get("total", (fit is None))
if target == "oqmd":
self.data.load_oqmd(self.space, fit=fit, stable=stable, total=total)
elif "legacy" in target:
self.data.load_library("legacy.dat")
elif target == "icsd":
self.data.load_oqmd(
self.space,
fit=fit,
search={"entry__path__contains": "icsd"},
stable=stable,
total=total_energy,
)
elif target == "prototypes":
self.data.load_oqmd(
space=self.space,
fit=fit,
search={"path__contains": "prototypes"},
stable=stable,
total=total_energy,
)
elif target == None:
pass
else:
raise ValueError("Unknown load argument: %s" % target)
def get_subspace(self, space):
data = self.data.get_phase_data(space)
return PhaseSpace(space, data=data)
_phases = None
@property
def phases(self):
if self._phases:
return self._phases
phases = [p for p in self.data.phases if self.in_space(p) and p.use]
self._phases = phases
return self._phases
@phases.setter
def phases(self, phases):
self.clear_all()
self.data = phase.PhaseData()
self.data.phases = phases
_phase_dict = None
@property
def phase_dict(self):
if self._phase_dict:
return self._phase_dict
phase_dict = dict(
[
(k, p)
for k, p in list(self.data.phase_dict.items())
if p.use and self.in_space(p)
]
)
self._phase_dict = phase_dict
return self._phase_dict
@phase_dict.setter
def phase_dict(self, phase_dict):
self.clear_all()
self.data = phase.PhaseData()
self.data.phases = list(phase_dict.values())
def phase_energy(self, p):
dE = sum([self.mus.get(k, 0) * v for k, v in list(p.unit_comp.items())])
N = sum(v for k, v in list(p.unit_comp.items()) if k in self.bound_space)
if N == 0:
N = 1
return (p.energy - dE) / N
def phase_comp(self, p):
comp = dict((k, v) for k, v in list(p.comp.items()) if k in self.bound_elements)
return unit_comp(comp)
def clear_data(self):
"""
Clears all phase data.
"""
self._phases = None
self._phase_dict = None
def clear_analysis(self):
"""
Clears all calculated results.
"""
self._stable = None
self._tie_lines = None
self._hull = None
self._spaces = None
self._dual_spaces = None
self._cliques = None
self._graph = None
def clear_all(self):
"""
Clears input data and analyzed results.
Same as:
>>> PhaseData.clear_data()
>>> PhaseData.clear_analysis()
"""
self.clear_data()
self.clear_analysis()
def load_tie_lines(self):
raise NotImplementedError
@property
def comp_dimension(self):
"""
Compositional dimension of the region of phase space.
Examples::
>>> s = PhaseSpace('Fe-Li-O')
>>> s.comp_dimension
2
>>> s = PhaseSpace('FeO-Ni2O-CoO-Ti3O4')
>>> s.comp_dimension
3
"""
return len(self.bounds) - 1
@property
def chempot_dimension(self):
"""
Chemical potential dimension.
Examples::
>>> s = PhaseSpace('Fe-Li', 'O=-2.5')
>>> s.chempot_dimension
0
>>> s = PhaseSpace('Fe-Li', 'N=0:-5')
>>> s.chempot_dimension
1
>>> s = PhaseSpace('Fe-Li', 'N=0:-5 F=0:-5')
>>> s.chempot_dimension
2
"""
cpdims = [k for k, v in list(self.mus.items()) if isinstance(v, list)]
return len(cpdims)
@property
def shape(self):
"""
(# of compositional dimensions, # of chemical potential dimensions)
The shape attribute of the PhaseSpace determines what type of phase
diagram will be drawn.
Examples::
>>> s = PhaseSpace('Fe-Li', 'O=-1.2')
>>> s.shape
(1, 0)
>>> s = PhaseSpace('Fe-Li', 'O=0:-5')
>>> s.shape
(1, 1)
>>> s = PhaseSpace('Fe-Li-P', 'O=0:-5')
>>> s.shape
(2,1)
>>> s = PhaseSpace('Fe', 'O=0:-5')
>>> s.shape
(0, 1)
"""
return (self.comp_dimension, self.chempot_dimension)
@property
def bound_space(self):
"""
Set of elements _of fixed composition_ in the PhaseSpace.
Examples::
>>> s = PhaseSpace('Fe-Li', 'O=-1.4')
>>> s.bound_space
set(['Fe', 'Li'])
"""
if self.bounds is None:
return set()
return set.union(*[set(b.keys()) for b in self.bounds])
@property
def bound_elements(self):
"""
Alphabetically ordered list of elements with constrained composition.
"""
return sorted(self.bound_space)
@property
def space(self):
"""
Set of elements present in the PhaseSpace.
Examples::
>>> s = PhaseSpace('Pb-Te-Se')
>>> s.space
set(['Pb', 'Te', 'Se'])
>>> s = PhaseSpace('PbTe-Na-PbSe')
>>> s.space
set(['Pb', 'Te', 'Na', 'Se'])
"""
return self.bound_space | set(self.mus.keys())
@property
def elements(self):
"""
Alphabetically ordered list of elements present in the PhaseSpace.
"""
return sorted(self.space)
def coord(self, composition, tol=1e-4):
"""Returns the barycentric coordinate of a composition, relative to the
bounds of the PhaseSpace. If the object isn't within the bounds, raises
a PhaseSpaceError.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.coord({'Fe':1, 'Li':1, 'O':2})
array([ 0.25, 0.25, 0.5 ])
>>> space = PhaseSpace('Fe2O3-Li2O')
>>> space.coord('Li5FeO4')
array([ 0.25, 0.75])
"""
if isinstance(composition, phase.Phase):
composition = composition.comp
elif isinstance(composition, str):
composition = parse_comp(composition)
composition = defaultdict(float, composition)
if self.bounds is None:
return np.array([composition[k] for k in self.bound_elements])
bcomp = dict(
(k, v) for k, v in list(composition.items()) if k in self.bound_space
)
composition = unit_comp(bcomp)
cvec = np.array([composition.get(k, 0) for k in self.bound_elements])
coord = np.linalg.lstsq(self.basis.T, cvec, rcond=None)[0]
if abs(sum(coord) - 1) > 1e-3 or any(c < -1e-3 for c in coord):
raise PhaseSpaceError
return coord
def comp(self, coord):
"""
Returns the composition of a coordinate in phase space.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.comp([0.2, 0.2, 0.6])
{'Fe': 0.2, 'O': 0.6, 'Li': 0.2}
"""
if self.bounds is None:
return defaultdict(float, list(zip(self.elements, coord)))
if len(coord) != len(self.bounds):
raise PhaseSpaceError
if len(coord) != len(self.bounds):
raise ValueError("Dimensions of coordinate must match PhaseSpace")
tot = sum(coord)
coord = [c / float(tot) for c in coord]
comp = defaultdict(float)
for b, x in zip(self.bounds, coord):
for elt, val in list(b.items()):
comp[elt] += val * x
return dict((k, v) for k, v in list(comp.items()) if v > 1e-4)
_spaces = None
@property
def spaces(self):
"""
List of lists of elements, such that every phase in self.phases
is contained in at least one set, and no set is a subset of
any other. This corresponds to the smallest subset of spaces that must
be analyzed to determine the stability of every phase in your dataset.
Examples::
>>> pa, pb, pc = Phase('A', 0), Phase('B', 0), Phase('C', 0)
>>> p1 = Phase('AB2', -1)
>>> p2 = Phase('B3C', -4)
>>> s = PhaseSpace('A-B-C', load=None)
>>> s.phases = [ pa, pb, pc, p1, p2 ]
>>> s.spaces
[['C', 'B'], ['A', 'B']]
"""
if self._spaces:
return self._spaces
spaces = set([frozenset(p.space) for p in list(self.phase_dict.values())])
spaces = [
space for space in spaces if not any([space < space2 for space2 in spaces])
]
self._spaces = list(map(list, spaces))
return self._spaces
def find_stable(self):
stable = set()
for space in self.spaces:
subspace = self.get_subspace(space)
stable |= set(subspace.stable)
self._stable = stable
return stable
_dual_spaces = None
@property
def dual_spaces(self):
"""
List of sets of elements, such that any possible tie-line
between two phases in phases is contained in at least one
set, and no set is a subset of any other.
"""
if self._dual_spaces is None:
# self._dual_spaces = self.get_dual_spaces()
self._dual_spaces = self.heap_structure_spaces()
return self._dual_spaces
def heap_structure_spaces(self):
if len(self.spaces) == 1:
return self.spaces
heap = Heap()
for i, (c1, c2) in enumerate(itertools.combinations(self.spaces, r=2)):
heap.add(set(c1 + c2))
return heap.sequences
def get_dual_spaces(self):
if len(self.spaces) == 1:
return self.spaces
dual_spaces = []
imax = len(self.spaces) ** 2 / 2
spaces = sorted(self.spaces, key=lambda x: -len(x))
for i, (c1, c2) in enumerate(itertools.combinations(spaces, r=2)):
c3 = frozenset(c1 + c2)
if c3 in sizes[n]:
break
for j, c4 in enumerate(dual_spaces):
if c3 <= c4:
break
elif c4 < c3:
dual_spaces[j] = c3
break
else:
dual_spaces.append(c3)
self._dual_spaces = dual_spaces
return self._dual_spaces
def find_tie_lines(self):
phases = list(self.phase_dict.values())
indict = dict((k, v) for v, k in enumerate(phases))
adjacency = np.zeros((len(indict), len(indict)))
for space in self.dual_spaces:
subspace = self.get_subspace(space)
for p1, p2 in subspace.tie_lines:
i1, i2 = sorted([indict[p1], indict[p2]])
adjacency[i1, i2] = 1
tl = set((phases[i], phases[j]) for i, j in zip(*np.nonzero(adjacency)))
self._tie_lines = tl
return tl
@property
def stable(self):
"""
List of stable phases
"""
if self._stable is None:
self.hull
# self.compute_hull()
return self._stable
@property
def unstable(self):
"""
List of unstable phases.
"""
if self._stable is None:
self.hull
# self.compute_hull()
return [p for p in self.phases if (not p in self.stable) and self.in_space(p)]
_tie_lines = None
@property
def tie_lines(self):
"""
List of length 2 tuples of phases with tie lines between them
"""
if self._tie_lines is None:
self.hull
# self.compute_hull()
return [list(tl) for tl in self._tie_lines]
@property
def tie_lines_list(self):
return list(self.tie_lines)
@property
def hull(self):
"""
List of facets of the convex hull.
"""
if self._hull is None:
self.get_hull()
return list(self._hull)
def get_hull(self):
if any(len(b) > 1 for b in self.bounds):
points = self.get_hull_points()
self.get_qhull(phases=points)
else:
self.get_qhull()
@property
def hull_list(self):
return list(self.hull)
_graph = None
@property
def graph(self):
"""
:mod:`networkx.Graph` representation of the phase space.
"""
if self._graph:
return self._graph
graph = nx.Graph()
graph.add_edges_from(self.tie_lines)
self._graph = graph
return self._graph
_cliques = None
@property
def cliques(self):
"""
Iterator over maximal cliques in the phase space. To get a list of
cliques, use list(PhaseSpace.cliques).
"""
if self._cliques is None:
self.find_cliques()
return self._cliques
def find_cliques(self):
self._cliques = nx.find_cliques(self.graph)
return self._cliques
def cliques_to_hull(self, cliques):
raise NotImplementedError
def stability_range(self, p, element=None):
"""
Calculate the range of phase `p` with respect to `element`.
"""
if element is None and len(self.mus) == 1:
element = list(self.mus.keys())[0]
tcomp = dict(p.unit_comp)
e, c = self.gclp(tcomp, mus=None)
tcomp[element] = tcomp.get(element, 0) + 0.001
edo, xdo = self.gclp(tcomp, mus=None)
tcomp[element] -= 0.001
if element in list(p.comp.keys()):
tcomp[element] -= 0.001
eup, xup = self.gclp(tcomp, mus=None)
return (edo - e) / 0.001, (e - eup) / 0.001
else:
return (edo - e) / 0.001, -20
def chempot_bounds(self, composition, total=False):
energy, phases = self.gclp(composition)
chems = {}
for eq in self.hull_list:
if not phases in eq:
continue
pots = eq.chemical_potentials
if total:
for k in pots:
pots[k] += qmpy.chem_pots["standard"]["elements"][k]
chems[eq] = pots
return chems
def chempot_range(self, p, element=None):
pot_bounds = {}
tcomp = dict(p.unit_comp)
e, c = self.gclp(tcomp, mus=None)
for elt in list(p.comp.keys()):
tcomp = dict(p.unit_comp)
tcomp[elt] -= 0.001
eup, xup = self.gclp(tcomp)
tcomp[elt] += 0.002
edo, xdo = self.gclp(tcomp)
pot_bounds[elt] = [(edo - e) / 0.001, (e - eup) / 0.001]
return pot_bounds
def get_tie_lines_by_gclp(self, iterable=False):
"""
Runs over pairs of Phases and tests for equilibrium by GCLP. Not
recommended, it is very slow.
"""
tie_lines = []
self.get_gclp_stable()
for k1, k2 in itertools.combinations(self.stable, 2):
testpoint = (self.coord(k1.unit_comp) + self.coord(k2.unit_comp)) / 2
energy, phases = self.gclp(self.comp(testpoint))
if abs(energy - (k1.energy + k2.energy) / 2) < 1e-8:
tie_lines.append([k1, k2])
if iterable:
yield [k1, k2]
self._tie_lines = tie_lines
def in_space(self, composition):
"""
Returns True, if the composition is in the right elemental-space
for this PhaseSpace.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.in_space('LiNiO2')
False
>>> space.in_space('Fe2O3')
True
"""
if self.bounds is None:
return True
if isinstance(composition, phase.Phase):
composition = composition.comp
elif isinstance(composition, str):
composition = parse_comp(composition)
if set(composition.keys()) <= self.space:
return True
else:
return False
def in_bounds(self, composition):
"""
Returns True, if the composition is within the bounds of the phase space
Examples::
>>> space = PhaseSpace('Fe2O3-NiO2-Li2O')
>>> space.in_bounds('Fe3O4')
False
>>> space.in_bounds('Li5FeO8')
True
"""
if self.bounds is None:
return True
if isinstance(composition, phase.Phase):
composition = composition.unit_comp
elif isinstance(composition, str):
composition = parse_comp(composition)
if not self.in_space(composition):
return False
composition = dict(
(k, v) for k, v in list(composition.items()) if k in self.bound_elements
)
composition = unit_comp(composition)
try:
c = self.coord(composition)
if len(self.bounds) < len(self.space):
comp = self.comp(c)
if set(comp.keys()) != set(composition.keys()) - set(self.mus.keys()):
return False
if not all(
[
abs(comp.get(k, 0) - composition.get(k, 0)) < 1e-3
for k in self.bound_elements
]
):
return False
except PhaseSpaceError:
return False
return True
### analysis stuff
def get_qhull(self, phases=None, mus={}):
"""
Get the convex hull for a given space.
"""
if phases is None: ## ensure there are phases to get the hull of
phases = list(self.phase_dict.values())
## ensure that all phases have negative formation energies
_phases = []
for p in phases:
if not p.use:
continue
if self.phase_energy(p) > 0:
continue
if not self.in_bounds(p):
continue
_phases.append(p)
phases = _phases
phase_space = set()
for p in phases:
phase_space |= p.space
A = []
for p in phases:
A.append(list(self.coord(p))[1:] + [self.phase_energy(p)])
dim = len(A[0])
for i in range(dim):
tmparr = [0 if a != i - 1 else 1 for a in range(dim)]
if not tmparr in A:
A.append(tmparr)
A = np.array(A)
if len(A) == len(A[0]):
self._hull = set([frozenset([p for p in phases])])
self._tie_lines = set(
[frozenset([k1, k2]) for k1, k2 in itertools.combinations(phases, r=2)]
)
self._stable = set([p for p in phases])
return
conv_hull = ConvexHull(A)
hull = set()
tie_lines = set()
stable = set()
for facet in conv_hull.simplices:
### various exclusion rules
if any([ind >= len(phases) for ind in facet]):
continue
if all(phases[ind].energy == 0 for ind in facet if ind < len(phases)):
continue
dim = len(facet)
face_matrix = np.array([A[i] for i in facet])
face_matrix[:, -1] = 1
v = np.linalg.det(face_matrix)
if abs(v) < 1e-8:
continue
face = frozenset([phases[ind] for ind in facet if ind < len(phases)])
stable |= set(face)
tie_lines |= set(
[frozenset([k1, k2]) for k1, k2 in itertools.combinations(face, r=2)]
)
hull.add(Equilibrium(face))
self._hull = hull
self._tie_lines = tie_lines
self._stable = stable
return hull
def get_chempot_qhull(self):
faces = list(self.hull)
A = []
for face in faces:
A.append([face.chem_pots[e] for e in self.elements])
A = np.array(A)
conv_hull = ConvexHull(A)
uhull = set()
for facet in conv_hull.simplices:
face = frozenset([faces[i] for i in facet if i < len(faces)])
uhull.add(face)
return uhull
def get_hull_points(self):
"""
Gets out-of PhaseSpace points. i.e. for FeSi2-Li, there are no other
phases in the space, but there are combinations of Li-Si phases and
Fe-Si phases. This method returns a list of phases including composite
phases from out of the space.
Examples::
>>> space = PhaseSpace('FeSi2-Li')
>>> space.get_hull_points()
[<Phase FeSi2 (23408): -0.45110217625>,
<Phase Li (104737): 0>,
<Phase 0.680 Li13Si4 + 0.320 FeSi : -0.3370691816>,
<Phase 0.647 Li8Si3 + 0.353 FeSi : -0.355992801765>,
<Phase 0.133 Fe3Si + 0.867 Li21Si5 : -0.239436904167>,
<Phase 0.278 FeSi + 0.722 Li21Si5 : -0.306877209723>]
"""
self._hull = set() # set of lists
self._stable = set() # set
done_list = [] # list of sorted lists
hull_points = [] # list of phases
if len(self.phases) == len(self.space):
self._hull = set(frozenset(self.phases))
self._stable = set(self.phases)
return
for b in self.bounds:
e, x = self.gclp(b)
p = phase.Phase.from_phases(x)
hull_points.append(p)
facets = [list(hull_points)]
while facets:
facet = facets.pop(0)
done_list.append(sorted(facet))
try:
phases, E = self.get_minima(list(self.phase_dict.values()), facet)
except:
continue
p = phase.Phase.from_phases(phases)
if p in self.phases:
p = self.phase_dict[p.name]
if not p in hull_points:
hull_points.append(p)
for new_facet in itertools.combinations(facet, r=len(facet) - 1):
new_facet = list(new_facet) + [p]
if new_facet not in done_list:
facets.append(new_facet)
return hull_points
def gclp(self, composition={}, mus={}, phases=[]):
"""
Returns energy, phase composition which is stable at given composition
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> energy, phases = space.gclp('FeLiO2')
>>> print phases
>>> print energy
"""
if not composition:
return 0.0, {}
if isinstance(composition, str):
composition = parse_comp(composition)
if not phases:
phases = [p for p in list(self.phase_dict.values()) if p.use]
_mus = self.mus
if mus is None:
_mus = {}
else:
_mus.update(mus)
in_phases = []
space = set(composition.keys()) | set(_mus)
for p in phases:
if p.energy is None:
continue
# if self.in_bounds(p):
if not set(p.comp.keys()) <= space:
continue
in_phases.append(p)
##[vh]
##print "in_phases: ", in_phases
return self._gclp(composition=composition, mus=_mus, phases=in_phases)
def _gclp(self, composition={}, mus={}, phases=[]):
if not qmpy.FOUND_PULP:
raise Exception(
"Cannot do GCLP without installing PuLP and an LP", "solver"
)
prob = pulp.LpProblem("GibbsEnergyMin", pulp.LpMinimize)
phase_vars = pulp.LpVariable.dicts("lib", phases, 0.0)
prob += (
pulp.lpSum(
[
(
p.energy
- sum(
[
p.unit_comp.get(elt, 0) * mu
for elt, mu in list(mus.items())
]
)
)
* phase_vars[p]
for p in phases
]
),
"Free Energy",
)
for elt, constraint in list(composition.items()):
prob += (
pulp.lpSum([p.unit_comp.get(elt, 0) * phase_vars[p] for p in phases])
== float(constraint),
"Conservation of " + elt,
)
##[vh]
##print prob
if pulp.GUROBI().available():
prob.solve(pulp.GUROBI(msg=False))
elif pulp.COIN_CMD().available():
prob.solve(pulp.COIN_CMD())
else:
prob.solve()
phase_comp = dict(
[
(p, phase_vars[p].varValue)
for p in phases
if phase_vars[p].varValue > 1e-5
]
)
energy = sum(p.energy * amt for p, amt in list(phase_comp.items()))
energy -= sum([a * composition.get(e, 0) for e, a in list(mus.items())])
return energy, phase_comp
def get_minima(self, phases, bounds):
"""
Given a set of Phases, get_minima will determine the minimum
free energy elemental composition as a weighted sum of these
compounds
"""
prob = pulp.LpProblem("GibbsEnergyMin", pulp.LpMinimize)
pvars = pulp.LpVariable.dicts("phase", phases, 0)
bvars = pulp.LpVariable.dicts("bound", bounds, 0.0, 1.0)
prob += (
pulp.lpSum(self.phase_energy(p) * pvars[p] for p in phases)
- pulp.lpSum(self.phase_energy(bound) * bvars[bound] for bound in bounds),
"Free Energy",
)
for elt in self.bound_space:
prob += (
sum([p.unit_comp.get(elt, 0) * pvars[p] for p in phases])
== sum([b.unit_comp.get(elt, 0) * bvars[b] for b in bounds]),
"Contraint to the proper range of" + elt,
)
prob += sum([bvars[b] for b in bounds]) == 1, "sum of bounds must be 1"
if pulp.GUROBI().available():
prob.solve(pulp.GUROBI(msg=False))
elif pulp.COIN_CMD().available():
prob.solve(pulp.COIN_CMD())
elif pulp.COINMP_DLL().available():
prob.solve(pulp.COINMP_DLL())
else:
prob.solve()
E = pulp.value(prob.objective)
xsoln = defaultdict(
float,
[(p, pvars[p].varValue) for p in phases if abs(pvars[p].varValue) > 1e-4],
)
return xsoln, E
def compute_hull(self):
phases = [
p
for p in list(self.phase_dict.values())
if (self.phase_energy(p) < 0 and len(p.space) > 1)
]
region = Region([self.phase_dict[elt] for elt in self.space])
region.contained = phases
def compute_stability(self, p):
"""
Compute the energy difference between the formation energy of a Phase,
and the energy of the convex hull in the absence of that phase.
"""
# if self.phase_dict[p.name] != p:
# stable = self.phase_dict[p.name]
# p.stability = p.energy - stable.energy
if len(p.comp) == 1:
stable = self.phase_dict[p.name]
p.stability = p.energy - stable.energy
else:
phases = list(self.phase_dict.values())
# < Mohan
# Add Error Handling for phase.remove(p)
# Old Code:
# phases.remove(p)
# New Code:
try:
phases.remove(p)
except ValueError:
import copy
_ps_dict = copy.deepcopy(self.phase_dict)
_ps_dict.pop(p.name, None)
phases = list(_ps_dict.values())
# Mohan >
energy, gclp_phases = self.gclp(p.unit_comp, phases=phases)
##print p, energy, gclp_phases
# vh
# print p, '------', gclp_phases
p.stability = p.energy - energy
# vh
return energy, gclp_phases
@transaction.atomic
def compute_stabilities(self, phases=None, save=False, reevaluate=True):
"""
Calculate the stability for every Phase.
Keyword Arguments:
phases:
List of Phases. If None, uses every Phase in PhaseSpace.phases
save:
If True, save the value for stability to the database.
new_only:
If True, only compute the stability for Phases which did not
import a stability from the OQMD. False by default.
"""
from qmpy.analysis.vasp.calculation import Calculation
if phases is None:
phases = self.phases
if reevaluate:
for p in self.phases:
p.stability = None
for p in phases:
if p.stability is None:
if p in list(self.phase_dict.values()):
self.compute_stability(p)
else:
p2 = self.phase_dict[p.name]
if p2.stability is None:
self.compute_stability(p2)
base = max(0, p2.stability)
diff = p.energy - p2.energy
p.stability = base + diff
if save:
qs = qmpy.FormationEnergy.objects.filter(id=p.id)
qs.update(stability=p.stability)
def save_tie_lines(self):
"""
Save all tie lines in this PhaseSpace to the OQMD. Stored in
Formation.equilibrium
"""
for p1, p2 in self.tie_lines:
p1.formation.equilibrium.add(p2.formation)
def compute_formation_energies(self):
"""
Evaluates the formation energy of every phase with respect to the
chemical potentials in the PhaseSpace.
"""
ref = []
for b in self.bounds:
if format_comp(b) in self.mus:
ref.append(self.mus[format_comp[b]])
else:
ref.append(self.gclp(b)[0])
ref = np.array(ref)
for p in self.phases:
p.energy = p.energy - sum(self.coord(p) * ref)
renderer = None
@property
def phase_diagram(self, **kwargs):
"""Renderer of a phase diagram of the PhaseSpace"""
if self.renderer is None:
self.get_phase_diagram(**kwargs)
return self.renderer
@property
def neighboring_equilibria(self):
neighbors = []
for eq1, eq2 in itertools.combinations(self.hull, r=2):
if eq1.adjacency(eq2) == 1:
neighbors.append([eq1, eq2])
return neighbors
def find_reaction_mus(self, element=None):
"""
Find the chemical potentials of a specified element at which reactions
occur.
Examples::
>>> s = PhaseSpace('Fe-Li-O')
>>> s.find_reaction_mus('O')
"""
if element is None and len(self.mus) == 1:
element = list(self.mus.keys())[0]
ps = PhaseSpace("-".join(self.space), data=self.data)
chem_pots = set()
for p in ps.stable:
chem_pots |= set(self.stability_range(p, element))
return sorted(chem_pots)
def chempot_scan(self, element=None, umin=None, umax=None):
"""
Scan through chemical potentials of `element` from `umin` to `umax`
identifing values at which phase transformations occur.
"""
if element is None and len(self.mus) == 1:
element = list(self.mus.keys())[0]
mus = self.find_reaction_mus(element=element)
if umin is None:
umin = min(mus)
if umax is None:
umax = max(mus)
windows = {}
hulls = []
mus = sorted(mus)
for i in range(len(mus)):
mu = mus[i]
if mu < umin or mu > umax:
continue
if i == 0:
nu = mu - 1
window = (None, mu)
elif i == len(mus) - 1:
nu = mu + 1
window = (mu, None)
else:
nu = np.average([mu, mus[i + 1]])
window = (mu, mus[i + 1])
self.mus[element] = nu
self.get_hull()
windows[window] = list(self.stable)
return windows
def get_phase_diagram(self, **kwargs):
"""
Creates a Renderer attribute with appropriate phase diagram components.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.get_renderer()
>>> plt.show()
"""
self.renderer = Renderer()
if self.shape == (0, 0):
self.make_as_unary(**kwargs)
elif self.shape == (1, 0):
self.make_as_binary(**kwargs)
elif self.shape == (2, 0):
self.make_as_ternary(**kwargs)
elif self.shape == (3, 0):
self.make_as_quaternary(**kwargs)
elif self.shape == (0, 1):
self.make_1d_vs_chempot(**kwargs)
elif self.shape == (1, 1):
self.make_vs_chempot(**kwargs)
else:
ps = PhaseSpace("-".join(self.space), data=self.data, load=None)
ps.renderer = Renderer()
ps.make_as_graph(**kwargs)
self.renderer = ps.renderer
def make_as_unary(self, **kwargs):
"""
Plot of phase volume vs formation energy.
Examples::
>>> s = PhaseSpace('Fe2O3')
>>> r = s.make_as_unary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
bottom, gclp = self.gclp(self.bounds[0])
bottom /= sum(self.bounds[0].values())
gs = phase.Phase.from_phases(gclp)
points = []
for p in self.phases:
if not self.in_bounds(p):
continue
if not p.calculation:
continue
v = p.calculation.volume_pa
pt = Point([v, self.phase_energy(p) - bottom], label=p.label)
points.append(pt)
# self.renderer.text.append(Text(pt, p.calculation.entry_id))
pc = PointCollection(points, color="red")
self.renderer.add(pc)
pt = Point([gs.volume, 0], label=gs.label, color="green")
self.renderer.add(pt)
xaxis = Axis("x", label="Volume", units="Å<sup>3</sup>/atom")
yaxis = Axis("y", label="Relative Energy", units="eV/atom")
self.renderer.xaxis = xaxis
self.renderer.yaxis = yaxis
self.renderer.options["grid"]["hoverable"] = True
self.renderer.options["tooltip"] = True
def make_1d_vs_chempot(self, **kwargs):
"""
Plot of phase stability vs chemical potential for a single composition.
Examples::
>>> s = PhaseSpace('Fe', mus={'O':[0,-4]})
>>> r = s.make_vs_chempot()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
self.make_vs_chempot(**kwargs)
self.renderer.xaxis.min = 0.5
self.renderer.xaxis.max = 1.5
self.renderer.xaxis.options["show"] = False
def make_vs_chempot(self, **kwargs):
"""
Plot of phase stability vs chemical potential for a range of
compositions.
Examples::
>>> s = PhaseSpace('Fe-Li', mus={'O':[0,-4]})
>>> r = s.make_vs_chempot()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
xaxis = Axis("x")
xaxis.min, xaxis.max = (0, 1)
xaxis.label = "-".join([format_comp(b) for b in self.bounds])
elt = list(self.mus.keys())[0]
yaxis = Axis("y", label="Δμ<sub>" + elt + "</sub>", units="eV/atom")
murange = list(self.mus.values())[0]
yaxis.min = min(murange)
yaxis.max = max(murange)
self.renderer.xaxis = xaxis
self.renderer.yaxis = yaxis
if False:
points = []
for window, hull in list(self.chempot_scan().items()):
hull = sorted(hull, key=lambda x: self.coord(x)[0])
for i in range(len(hull) - 1):
p1 = hull[i]
p2 = hull[i + 1]
x1 = self.coord(p1)[0]
x2 = self.coord(p2)[0]
line = Line(
[
Point([x1, window[0], window[1]]),
Point([x2, window[0], window[1]]),
],
fill=True,
)
self.renderer.add(line)
ps = PhaseSpace("-".join(self.space), data=self.data, load=None)
points = set()
lines = []
hlines = set()
for p in ps.stable:
if not self.in_bounds(p):
continue
bot, top = ps.stability_range(p, elt)
x = self.coord(p)[0]
line = Line([Point([x, bot]), Point([x, top])], color="blue")
lines.append(line)
hlines |= set([bot, top])
points.add(Point([x, bot]))
points.add(Point([x, top]))
y = np.average([bot, top])
if y < min(murange):
y = min(murange)
elif y > max(murange):
y = max(murange)
t = Text([x, y], "<b>%s</b>" % p.name)
self.renderer.add(t)
pc = PointCollection(list(points), color="green")
for h in hlines:
self.renderer.add(Line([[0, h], [1, h]], color="grey"))
for l in lines:
self.renderer.add(l)
self.renderer.add(pc)
self.renderer.options["grid"]["hoverable"] = True
def make_as_binary(self, **kwargs):
"""
Construct a binary phase diagram (convex hull) and write it to a
:mod:`~qmpy.Renderer`.
Examples::
>>> s = PhaseSpace('Fe-P')
>>> r = s.make_as_binary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
xlabel = "%s<sub>x</sub>%s<sub>1-x</sub>" % (
format_comp(self.bounds[0]),
format_comp(self.bounds[1]),
)
xaxis = Axis("x", label=xlabel)
xaxis.min, xaxis.max = (0, 1)
yaxis = Axis("y", label="Delta H", units="eV/atom")
self.renderer.xaxis = xaxis
self.renderer.yaxis = yaxis
for p1, p2 in self.tie_lines:
pt1 = Point([self.coord(p1)[0], self.phase_energy(p1)])
pt2 = Point([self.coord(p2)[0], self.phase_energy(p2)])
self.renderer.lines.append(Line([pt1, pt2], color="grey"))
points = []
for p in self.unstable:
if not p.use:
continue
if self.phase_energy(p) > 0:
continue
if not self.in_bounds(p):
continue
x = self.coord(p.unit_comp)[0]
pt = Point([x, self.phase_energy(p)], label=p.label)
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=1, color="red")
)
points = []
for p in self.stable:
if not self.in_bounds(p):
continue
x = self.coord(p.unit_comp)[0]
pt = Point([x, self.phase_energy(p)], label=p.label)
if p.show_label:
self.renderer.text.append(Text(pt, p.name))
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=True, color="green")
)
self.renderer.options["grid"]["hoverable"] = True
self.renderer.options["tooltip"] = True
self.renderer.options["tooltipOpts"] = {"content": "%label"}
def make_as_ternary(self, **kwargs):
"""
Construct a ternary phase diagram and write it to a
:mod:`~qmpy.Renderer`.
Examples::
>>> s = PhaseSpace('Fe-Li-O-P')
>>> r = s.make_as_quaternary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
for p1, p2 in self.tie_lines:
pt1 = Point(coord_to_gtri(self.coord(p1)))
pt2 = Point(coord_to_gtri(self.coord(p2)))
line = Line([pt1, pt2], color="grey")
self.renderer.lines.append(line)
points = []
for p in self.unstable:
if not self.in_bounds(p):
continue
if self.phase_dict[p.name] in self.stable:
continue
##pt = Point(coord_to_gtri(self.coord(p)), label=p.label)
options = {"hull_distance": p.stability}
pt = Point(coord_to_gtri(self.coord(p)), label=p.label, **options)
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=True, color="red")
)
self.renderer.options["xaxis"]["show"] = False
points = []
for p in self.stable:
if not self.in_bounds(p):
continue
pt = Point(coord_to_gtri(self.coord(p)), label=p.label)
if p.show_label:
self.renderer.add(Text(pt, p.name))
points.append(pt)
self.renderer.point_collections.append(
PointCollection(points, fill=True, color="green")
)
self.renderer.options["grid"]["hoverable"] = (True,)
self.renderer.options["grid"]["borderWidth"] = 0
self.renderer.options["grid"]["margin"] = 4
self.renderer.options["grid"]["show"] = False
self.renderer.options["tooltip"] = True
def make_as_quaternary(self, **kwargs):
"""
Construct a quaternary phase diagram and write it to a
:mod:`~qmpy.Renderer`.
Examples::
>>> s = PhaseSpace('Fe-Li-O-P')
>>> r = s.make_as_quaternary()
>>> r.plot_in_matplotlib()
>>> plt.show()
"""
# plot lines
for p1, p2 in self.tie_lines:
pt1 = Point(coord_to_gtet(self.coord(p1)))
pt2 = Point(coord_to_gtet(self.coord(p2)))
line = Line([pt1, pt2], color="grey")
self.renderer.add(line)
# plot compounds
### < Mohan
# Use phase_dict to collect unstable phases, which will
# return one phase per composition
points = []
for c, p in list(self.phase_dict.items()):
if not self.in_bounds(p):
continue
if p in self.stable:
continue
if p.stability == None:
continue
label = "{}<br> hull distance: {:.3f} eV/atom<br> formation energy: {:.3f} eV/atom".format(
p.name, p.stability, p.energy
)
pt = Point(coord_to_gtet(self.coord(p)), label=label)
points.append(pt)
self.renderer.add(PointCollection(points, color="red", label="Unstable"))
## Older codes:
# for p in self.unstable:
# if not self.in_bounds(p):
# continue
# pt = Point(coord_to_gtet(self.coord(p)), label=p.name)
# points.append(pt)
# self.renderer.add(PointCollection(points,
# color='red', label='Unstable'))
### Mohan >
points = []
for p in self.stable:
if not self.in_bounds(p):
continue
label = "%s:<br>- " % p.name
label += " <br>- ".join(o.name for o in list(self.graph[p].keys()))
pt = Point(coord_to_gtet(self.coord(p)), label=label)
points.append(pt)
if p.show_label:
self.renderer.add(Text(pt, format_html(p.comp)))
self.renderer.add(PointCollection(points, color="green", label="Stable"))
self.renderer.options["grid"]["hoverable"] = (True,)
self.renderer.options["grid"]["borderWidth"] = 0
self.renderer.options["grid"]["show"] = False
self.renderer.options["tooltip"] = True
def make_as_graph(self, **kwargs):
"""
Construct a graph-style visualization of the phase diagram.
"""
G = self.graph
positions = nx.drawing.nx_agraph.pygraphviz_layout(G)
for p1, p2 in self.tie_lines:
pt1 = Point(positions[p1])
pt2 = Point(positions[p2])
line = Line([pt1, pt2], color="grey")
self.renderer.add(line)
points = []
for p in self.stable:
label = "%s:<br>" % p.name
for other in list(G[p].keys()):
label += " -%s<br>" % other.name
pt = Point(positions[p], label=label)
points.append(pt)
if p.show_label:
self.renderer.add(Text(pt, p.name))
pc = PointCollection(points, color="green")
self.renderer.add(pc)
self.renderer.options["grid"]["hoverable"] = True
self.renderer.options["grid"]["borderWidth"] = 0
self.renderer.options["grid"]["show"] = False
self.renderer.options["tooltip"] = True
def stability_window(self, composition, **kwargs):
self.renderer = Renderer()
chem_pots = self.chempot_bounds(composition)
for eq, pots in list(chem_pots.items()):
pt = Point(coord_to_point([pots[k] for k in self.elements]))
self.renderer.add(pt)
def get_reaction(self, var, facet=None):
"""
For a given composition, what is the maximum delta_composition reaction
on the given facet. If None, returns the whole reaction for the given
PhaseSpace.
Examples::
>>> space = PhaseSpace('Fe2O3-Li2O')
>>> equilibria = space.hull[0]
>>> space.get_reaction('Li2O', facet=equilibria)
"""
if isinstance(var, str):
var = parse_comp(var)
if facet:
phases = facet
else:
phases = self.stable
prob = pulp.LpProblem("BalanceReaction", pulp.LpMaximize)
pvars = pulp.LpVariable.dicts("prod", phases, 0)
rvars = pulp.LpVariable.dicts("react", phases, 0)
prob += (
sum([p.fraction(var)["var"] * pvars[p] for p in phases])
- sum([p.fraction(var)["var"] * rvars[p] for p in phases]),
"Maximize delta comp",
)
for celt in self.space:
prob += (
sum([p.fraction(var)[celt] * pvars[p] for p in phases])
== sum([p.fraction(var)[celt] * rvars[p] for p in phases]),
"identical %s composition on both sides" % celt,
)
prob += sum([rvars[p] for p in phases]) == 1
if pulp.GUROBI().available():
prob.solve(pulp.GUROBI(msg=False))
elif pulp.COIN_CMD().available():
prob.solve(pulp.COIN_CMD())
elif pulp.COINMP_DLL().available():
prob.solve(pulp.COINMP_DLL())
else:
prob.solve()
prods = defaultdict(
float, [(c, pvars[c].varValue) for c in phases if pvars[c].varValue > 1e-4]
)
reacts = defaultdict(
float, [(c, rvars[c].varValue) for c in phases if rvars[c].varValue > 1e-4]
)
n_elt = pulp.value(prob.objective)
return reacts, prods, n_elt
def get_reactions(self, var, electrons=1.0):
"""
Returns a list of Reactions.
Examples::
>>> space = PhaseSpace('Fe-Li-O')
>>> space.get_reactions('Li', electrons=1)
"""
if isinstance(var, str):
var = parse_comp(var)
vname = format_comp(reduce_comp(var))
vphase = self.phase_dict[vname]
vpd = dict((self.phase_dict[k], v) for k, v in list(var.items()))
for facet in self.hull:
reacts, prods, delta_var = self.get_reaction(var, facet=facet)
if vphase in facet:
yield Reaction(
products={vphase: sum(vphase.comp.values())},
reactants={},
delta_var=1.0,
electrons=electrons,
variable=var,
)
continue
elif delta_var < 1e-6:
pass
yield Reaction(
products=prods,
reactants=reacts,
delta_var=delta_var,
variable=var,
electrons=electrons,
)
def plot_reactions(self, var, electrons=1.0, save=False):
"""
Plot the convex hull along the reaction path, as well as the voltage
profile.
"""
if isinstance(var, str):
var = parse_comp(var)
vname = format_comp(var)
fig = plt.figure()
ax1 = fig.add_subplot(211)
# plot tie lines
for p1, p2 in self.tie_lines:
c1 = p1.fraction(var)["var"]
c2 = p2.fraction(var)["var"]
if abs(c1) < 1e-4 or abs(c2) < 1e-4:
if abs(c1 - 1) < 1e-4 or abs(c2 - 1) < 1e-4:
if len(self.tie_lines) > 1:
continue
ax1.plot([c1, c2], [self.phase_energy(p1), self.phase_energy(p2)], "k")
# plot compounds
for p in self.stable:
x = p.fraction(var)["var"]
ax1.plot(x, self.phase_energy(p), "bo")
ax1.text(
x, self.phase_energy(p), "$\\rm{%s}$" % p.latex, ha="left", va="top"
)
plt.ylabel("$\\rm{\Delta H}$ $\\rm{[eV/atom]}$")
ymin, ymax = ax1.get_ylim()
ax1.set_ylim(ymin - 0.1, ymax)
ax2 = fig.add_subplot(212, sharex=ax1)
points = set()
for reaction in self.get_reactions(var, electrons=electrons):
if reaction.delta_var == 0:
continue
voltage = reaction.delta_h / reaction.delta_var / electrons
x1 = reaction.r_var_comp
x2 = reaction.p_var_comp
points |= set([(x1, voltage), (x2, voltage)])
points = sorted(points, key=lambda x: x[0])
points = sorted(points, key=lambda x: -x[1])
#!v
# print points
base = sorted(self.stable, key=lambda x: x.amt(var)["var"])[0]
max_x = max([k[0] for k in points])
if len(points) > 1:
for i in range(len(points) - 2):
ax2.plot(
[points[i][0], points[i + 1][0]],
[points[i][1], points[i + 1][1]],
"k",
)
ax2.plot(
[points[-2][0], points[-2][0]], [points[-2][1], points[-1][1]], "k"
)
ax2.plot([points[-2][0], max_x], [points[-1][1], points[-1][1]], "k")
else:
ax2.plot([0, max_x], [points[0][1], points[0][1]], "k")
plt.xlabel(
"$\\rm{x}$ $\\rm{in}$ $\\rm{(%s)_{x}(%s)_{1-x}}$"
% (format_latex(var), base.latex)
)
plt.ylabel("$\\rm{Voltage}$ $\\rm{[V]}$")
return ax1, ax2
# if not save:
# plt.show()
# else:
# plt.savefig('%s-%s.eps' % (save, vname),
# bbox_inches='tight',
# transparent=True,
# pad_inches=0)
| {
"content_hash": "d4e66a025277b5a824826eeeeb972144",
"timestamp": "",
"source": "github",
"line_count": 1783,
"max_line_length": 103,
"avg_line_length": 32.004486819966345,
"alnum_prop": 0.503539885041357,
"repo_name": "wolverton-research-group/qmpy",
"id": "5e22832dfee835728f2d8da40e258b636d951ed7",
"size": "57105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qmpy/analysis/thermodynamics/space.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32296"
},
{
"name": "Cython",
"bytes": "745"
},
{
"name": "GAP",
"bytes": "5012"
},
{
"name": "HTML",
"bytes": "144402"
},
{
"name": "JavaScript",
"bytes": "16825352"
},
{
"name": "PHP",
"bytes": "7301"
},
{
"name": "Python",
"bytes": "800075"
},
{
"name": "Shell",
"bytes": "2784"
}
],
"symlink_target": ""
} |
"""Scalyr Agent Plugin Module - Windows Process Metrics
This module extends the ScalyrMonitor base class to implement it's functionality, a process
metrics collector for the Windows (Server 2003 and newer) platforms, as a monitor plugin into
the Scalyr plugin framework.
The two most important object in this monitor are:
1. The METRICS list; which defines the metrics that this module will collect
2. The ProcessMonitor class which drives the collection of each defined metric and emits
its associated value at a specified sampling rate.
>>> import re, operator, collections
>>> metric_template = "{metric.metric_name} - {metric.description} {metric.units}".format
>>> criteria = dict(
... category = 'cpu',
... metric_name = 'winproc.disk.*',
... match = any
... )
>>> predicates = [(operator.itemgetter(k), re.compile(v))
... for k,v in criteria.items()
... if k is not 'match']
>>> Telemetry = collections.namedtuple('Telemetry', 'match metric attribute fetcher matcher')
>>> for metric in METRICS:
... matches = []
... for fetcher, matcher in predicates:
... attribute = fetcher(metric)
... match = matcher.search(attribute)
... matches.append(Telemetry(match, metric, attribute, fetcher, matcher))
... else:
... if any(itertools.ifilter(operator.attrgetter('match'), matches)):
... print metric_template(metric)
>>> from scalyr_agent import run_monitor
>>> monitors_path = path.join(path.dirname(scalyr_agent.__file__), 'builtin_monitors')
>>> cmdline = ['-p', monitors_path, -c, '{commandline:cmd}', 'windows_process_metrics' ]
>>> parser = run_monitor.create_parser()
>>> options, args = parser.parse_args(cmdline)
>>> run_monitor.run_standalone_monitor(args[0], options.monitor_module, options.monitors_path,
... options.monitor_config options.monitor_sample_interval)
0
>>>
Author: Scott Sullivan <guy.hoozdis+scalyr@gmail.com>
License: Apache 2.0
------------------------------------------------------------------------
Copyright 2014 Scalyr Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------
"""
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "Scott Sullivan <guy.hoozdis@gmail.com>"
__version__ = "0.0.1"
__monitor__ = __name__
import os
import re
import datetime
import time
from operator import methodcaller, attrgetter
from collections import namedtuple
try:
import psutil
except ImportError:
psutil = None
import six
from scalyr_agent import ScalyrMonitor, UnsupportedSystem, BadMonitorConfiguration
from scalyr_agent import define_config_option, define_metric, define_log_field
from scalyr_agent import scalyr_logging
global_log = scalyr_logging.getLogger(__name__)
#
# Monitor Configuration - defines the runtime environment and resources available
#
CONFIG_OPTIONS = [
dict(
option_name="module",
option_description="Always `scalyr_agent.builtin_monitors.windows_process_metrics`",
convert_to=six.text_type,
required_option=True,
),
dict(
option_name="id",
option_description="An id, included with each event. Shows in the UI as a value "
"for the `instance` field. This is especially useful if you are running multiple "
"instances of this plugin to import metrics from multiple processes. Each instance "
"has a separate `{...}` stanza in the configuration file "
r"(`C:\Program Files (x86)\Scalyr\config\agent.json`).",
required_option=True,
convert_to=six.text_type,
),
dict(
option_name="commandline",
option_description="A regular expression, matching on the command line output "
"of `tasklist`, or `wmic process list`. Selects the process of interest. "
"If multiple processes match, only metrics from the first match are imported.",
default=None,
convert_to=six.text_type,
),
dict(
option_name="pid",
option_description="Process identifier (PID). An alternative to `commandline` "
"to select a process. If `commandline` is set, this property is ignored.",
default=None,
convert_to=six.text_type,
),
]
_ = [
define_config_option(__monitor__, **option) for option in CONFIG_OPTIONS # type: ignore
]
# End Monitor Configuration
# #########################################################################################
# #########################################################################################
# #########################################################################################
# ## Process's Metrics / Dimensions -
# ##
# ## Metrics define the capibilities of this monitor. These some utility functions
# ## along with the list(s) of metrics themselves.
# ##
def _gather_metric(method, attribute=None, transform=None):
"""Curry arbitrary process metric extraction
@param method: a callable member of the process object interface
@param attribute: an optional data member, of the data structure returned by ``method``
@param transform: an optional function that can be used to transform the value returned by ``method``.
The function should take a single argument and return the value to report as the metric value.
@type method callable
@type attribute str
@type transform: func()
"""
doc = "Extract the {} attribute from the given process object".format
if attribute:
doc = ( # NOQA
"Extract the {}().{} attribute from the given process object".format
)
def gather_metric(process):
"""Dynamically Generated"""
errmsg = (
"Only the 'psutil.Process' interface is supported currently; not {}".format
)
proc_type = type(process)
assert proc_type is psutil.Process, errmsg(proc_type)
metric = methodcaller(method) # pylint: disable=redefined-outer-name
if attribute is not None:
value = attrgetter(attribute)(metric(process))
else:
value = metric(process)
if transform is not None:
value = transform(value)
return value
# XXX: For some reason this was causing trouble for the documentation build process
# gather_metric.__doc__ = doc(method, attribute)
return gather_metric
# TODO: I believe this function can be deleted.
def uptime(start_time):
"""Calculate the difference between now() and the given create_time.
@param start_time: milliseconds passed since 'event' (not since epoc)
@type float
"""
return datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(start_time)
def uptime_from_start_time(start_time):
"""Returns the uptime for the process given its ``start_time``.
@param start_time: The time the process started in seconds past epoch.
@type start_time: float
@return: The seconds since the process started.
@rtype: float
"""
return time.time() - start_time
METRIC = namedtuple("METRIC", "config dispatch")
METRIC_CONFIG = dict # pylint: disable=invalid-name
GATHER_METRIC = _gather_metric
# =================================================================================
# ============================ Process CPU ==================================
# =================================================================================
_PROCESS_CPU_METRICS = [
METRIC( # ------------------ User-mode CPU ----------------------------
METRIC_CONFIG(
metric_name="winproc.cpu",
description="Seconds of user space CPU execution. The value is cumulative since process "
"start; see `winproc.uptime`.",
category="CPU",
unit="secs",
cumulative=True,
extra_fields={"type": "user"},
),
GATHER_METRIC("cpu_times", "user"),
),
METRIC( # ------------------ Kernel-mode CPU ----------------------------
METRIC_CONFIG(
metric_name="winproc.cpu",
description="Seconds of kernel space CPU execution. The value is cumulative since "
"process start; see `winproc.uptime`.",
category="CPU",
unit="secs",
cumulative=True,
extra_fields={"type": "system"},
),
GATHER_METRIC("cpu_times", "system"),
),
# TODO: Additional attributes for this section
# * context switches
# * ...
]
# =================================================================================
# ======================== Process Attributes ===============================
# =================================================================================
_PROCESS_ATTRIBUTE_METRICS = [
METRIC( # ------------------ Process Uptime ----------------------------
METRIC_CONFIG(
metric_name="winproc.uptime",
description="Process uptime, in seconds.",
category="General",
unit="seconds",
cumulative=True,
extra_fields={},
),
GATHER_METRIC("create_time", transform=uptime_from_start_time),
),
METRIC( # ------------------ Process Threads ----------------------------
METRIC_CONFIG(
metric_name="winproc.threads",
description="Number of threads used by the process.",
category="General",
extra_fields={},
),
GATHER_METRIC("num_threads"),
),
# TODO: Additional attributes for this section
# * number of handles
# * number of child processes
# * process priority
# * process cmdline
# * procress working directory
# * process env vars
# * parent PID
# * cpu affinity
]
# =================================================================================
# ======================== Process Memory ===================================
# =================================================================================
_PROCESS_MEMORY_METRICS = [
METRIC( # ------------------ Working Set ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Bytes of physical memory used by the process's working set. "
"Memory that must be paged in for the process to execute.",
category="Memory",
unit="bytes",
extra_fields={"type": "working_set"},
),
GATHER_METRIC("memory_info_ex", "wset"),
),
METRIC( # ------------------ Peak Working Set ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Peak working set size, in bytes, for the process since creation.",
category="Memory",
unit="bytes",
extra_fields={"type": "peak_working_set"},
),
GATHER_METRIC("memory_info_ex", "peak_wset"),
),
METRIC( # ------------------ Paged Pool ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Paged-pool usage, in bytes. Swappable memory in use.",
category="Memory",
unit="bytes",
extra_fields={"type": "paged_pool"},
),
GATHER_METRIC("memory_info_ex", "paged_pool"),
),
METRIC( # ------------------ Peak Paged Pool ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Peak paged-pool usage, in bytes.",
category="Memory",
unit="bytes",
extra_fields={"type": "peak_paged_pool"},
),
GATHER_METRIC("memory_info_ex", "peak_paged_pool"),
),
METRIC( # ------------------ NonPaged Pool ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Nonpaged pool usage, in bytes. "
"Memory in use that cannot be swapped out to disk.",
category="Memory",
unit="bytes",
extra_fields={"type": "nonpaged_pool"},
),
GATHER_METRIC("memory_info_ex", "nonpaged_pool"),
),
METRIC( # ------------------ Peak NonPaged Pool ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Peak nonpaged pool usage, in bytes.",
category="Memory",
unit="bytes",
extra_fields={"type": "peak_nonpaged_pool"},
),
GATHER_METRIC("memory_info_ex", "peak_nonpaged_pool"),
),
METRIC( # ------------------ Pagefile ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Pagefile usage, in bytes. Bytes the system has "
"committed for this running process.",
category="Memory",
unit="bytes",
extra_fields={"type": "pagefile"},
),
GATHER_METRIC("memory_info_ex", "pagefile"),
),
METRIC( # ------------------ Peak Pagefile ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Peak pagefile usage, in bytes.",
category="Memory",
unit="bytes",
extra_fields={"type": "peak_pagefile"},
),
GATHER_METRIC("memory_info_ex", "peak_pagefile"),
),
METRIC( # ------------------ Resident size ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Current resident memory size, in bytes. This should be the same as the working set.",
category="Memory",
unit="bytes",
extra_fields={"type": "rss"},
),
GATHER_METRIC("memory_info", "rss"),
),
METRIC( # ------------------ Virtual memory size ----------------------------
METRIC_CONFIG(
metric_name="winproc.mem.bytes",
description="Virtual memory size, in bytes. Does not include shared pages.",
category="Memory",
unit="bytes",
extra_fields={"type": "vms"},
),
GATHER_METRIC("memory_info", "vms"),
),
# TODO: Additional attributes for this section
# * ...
]
# =================================================================================
# ============================= DISK IO =====================================
# =================================================================================
_PROCESS_DISK_IO_METRICS = [
METRIC( # ------------------ Disk Read Operations ----------------------------
METRIC_CONFIG(
metric_name="winproc.disk.ops",
description="Number of disk read requests. The value is cumulative since "
"process start; see `winproc.uptime`.",
category="Disk",
unit="requests",
cumulative=True,
extra_fields={"type": "read"},
),
GATHER_METRIC("io_counters", "read_count"),
),
METRIC( # ------------------ Disk Write Operations ----------------------------
METRIC_CONFIG(
metric_name="winproc.disk.ops",
description="Number of disk write requests. The value is cumulative since "
"process start; see `winproc.uptime`.",
category="Disk",
unit="requests",
cumulative=True,
extra_fields={"type": "write"},
),
GATHER_METRIC("io_counters", "write_count"),
),
METRIC( # ------------------ Disk Read Bytes ----------------------------
METRIC_CONFIG(
metric_name="winproc.disk.bytes",
description="Bytes read from disk. The value is cumulative since process "
"start; see `winproc.uptime`.",
category="Disk",
unit="bytes",
cumulative=True,
extra_fields={"type": "read"},
),
GATHER_METRIC("io_counters", "read_bytes"),
),
METRIC( # ------------------ Disk Read Bytes ----------------------------
METRIC_CONFIG(
metric_name="winproc.disk.bytes",
description="Bytes written to disk. The value is cumulative since process "
"start; see `winproc.uptime`.",
category="Disk",
unit="bytes",
cumulative=True,
extra_fields={"type": "write"},
),
GATHER_METRIC("io_counters", "write_bytes"),
)
# TODO: Additional attributes for this section
# * ...
]
METRICS = (
_PROCESS_CPU_METRICS
+ _PROCESS_ATTRIBUTE_METRICS
+ _PROCESS_MEMORY_METRICS
+ _PROCESS_DISK_IO_METRICS
)
_ = [define_metric(__monitor__, **metric.config) for metric in METRICS]
#
# Logging / Reporting - defines the method and content in which the metrics are reported.
#
define_log_field(__monitor__, "monitor", "Always `windows_process_metrics`.")
define_log_field(
__monitor__,
"instance",
"The `id` value, for example `tomcat`.",
)
define_log_field(
__monitor__,
"app",
"Same as `instance`; created for compatibility with the original Scalyr Agent.",
)
define_log_field(
__monitor__,
"metric",
'Name of the metric, for example "winproc.cpu". Some metrics have additional '
"fields; see the [Metrics Reference](#metrics).",
)
define_log_field(__monitor__, "value", "Value of the metric.")
#
#
#
def commandline_matcher(regex, flags=re.IGNORECASE):
"""
@param regex: a regular expression to compile and use to search process commandlines for matches
@param flags: modify the regular expression with standard flags (see ``re`` module)
@type regex str
@type flags int
"""
pattern = re.compile(regex, flags)
def _cmdline(process):
"""Compose the process's commandline parameters as a string"""
return " ".join(process.cmdline())
def _match_generator(processes):
"""
@param processes: an iterable list of process object interfaces
@type interface
"""
for process in processes:
try:
if pattern.search(process.name()) or pattern.search(_cmdline(process)):
return process
except psutil.AccessDenied:
# Just skip this process if we don't have access to it.
continue
return None
return _match_generator
class ProcessMonitor(ScalyrMonitor):
# fmt: off
r"""
# Windows Process Metrics
Import CPU consumption, memory usage, and other metrics for a process, or group of processes, on a Windows server.
An [Agent Plugin](https://app.scalyr.com/help/scalyr-agent#plugins) is a component of the Scalyr Agent, enabling the collection of more data. The source code for each plugin is available on [Github](https://github.com/scalyr/scalyr-agent-2/tree/master/scalyr_agent/builtin_monitors).
You can use this plugin to monitor resource usage for a web server, database, or other application. 32-bit Windows systems are not supported.
This plugin requires installation of the python module `psutil`, typically with the command `pip install psutil`.
You can disable collection of these metrics by setting `implicit_agent_process_metrics_monitor: false` at the top level of the Agent [configuration file](/help/scalyr-agent#plugins).
## Installation
1\. Install the Scalyr Agent
If you haven't already, install the [Scalyr Agent](https://app.scalyr.com/help/welcome) on the Windows server.
2\. Configure the Scalyr Agent to import process metrics
Open the Scalyr Agent configuration file, located at `C:\Program Files (x86)\Scalyr\config\agent.json`.
Find the `monitors: [ ... ]` section and add a `{...}` stanza with the `module` property set for windows process metrics:
monitors: [
{
module: "scalyr_agent.builtin_monitors.windows_process_metrics",
id: "tomcat",
commandline: "java.*tomcat6",
}
]
The `id` property lets you identify the command whose output you are importing. It shows in the UI as a value for the `instance` field. This is especially useful if you are running multiple instances of this plugin, to import metrics from multiple processes. Add a separate `{...}` stanza for each instance, and set unique `id`s.
The `commandline` property is a [regular expression](https://app.scalyr.com/help/regex), matching on the command line output of `tasklist`, or `wmic process list`. If multiple processes match, only the first is used. The above example imports metrics for the first process whose command line output matches the regular expression `java.*tomcat6`.
You can also select a process by process identifier (PID). See [Configuration Options](#options) below.
3\. Save and confirm
Save the `agent.json` file. The Agent will detect changes within 30 seconds. Wait a few minutes for data to send.
You can check the [Agent Status](https://app.scalyr.com/help/scalyr-agent#agentStatus), which includes information about all running monitors.
4\. Configure the process metrics dashboard for each `id`
Log into Scalyr and click Dashboards > Windows Process Metrics. At the top of the dashboard you can select the `serverHost` and `process` of interest. (We map the `instance` field, explained above, to `process`).
Click `...` in upper right of the page and select "Edit JSON". Find these lines near the top of the JSON file:
// On the next line, list each "id" that you've used in a windows_process_metrics
// clause in the Scalyr Agent configuration file (agent.json).
values: [ "agent" ]
The "agent" id is used to report metrics for the Scalyr Agent. Add each `id` you created to the `values` list. For example, to add "tomcat":
values: [ "agent", "tomcat" ]
Save the file. To view all data collected by this plugin, across all servers, go to Search view and query [monitor = 'windows_process_metrics'](https://app.scalyr.com/events?filter=monitor+%3D+%27windows_process_metrics%27).
For help, contact Support.
"""
# fmt: on
def __init__(
self, monitor_config, logger, sample_interval_secs=None, global_config=None
):
"""TODO: Function documentation"""
if psutil is None:
raise UnsupportedSystem(
"windows_process_metrics",
'You must install the python module "psutil" to use this module. Typically, this'
"can be done with the following command:"
" pip install psutil",
)
super(ProcessMonitor, self).__init__(
monitor_config=monitor_config,
logger=logger,
sample_interval_secs=sample_interval_secs,
global_config=global_config,
)
self.__process = None
def _initialize(self):
self.__id = self._config.get(
"id", required_field=True, convert_to=six.text_type
)
if not self._config.get("commandline") and not self._config.get("pid"):
raise BadMonitorConfiguration(
'Either "pid" or "commandline" monitor config option needs to be specified (but not both)',
"commandline",
)
if self._config.get("commandline") and self._config.get("pid"):
raise BadMonitorConfiguration(
'Either "pid" or "commandline" monitor config option needs to be specified (but not both)',
"commandline",
)
def _select_target_process(self):
"""TODO: Function documentation"""
process = None
if "commandline" in self._config:
matcher = commandline_matcher(self._config["commandline"])
process = matcher(psutil.process_iter())
elif "pid" in self._config:
if "$$" == self._config.get("pid"):
pid = os.getpid()
else:
pid = self._config.get("pid")
process = psutil.Process(int(pid))
self.__process = process
def gather_sample(self):
try:
self._select_target_process()
for idx, metric in enumerate(METRICS):
if not self.__process:
break
metric_name = metric.config["metric_name"]
metric_value = metric.dispatch(self.__process)
extra_fields = metric.config["extra_fields"]
if extra_fields is None:
extra_fields = {}
extra_fields["app"] = self.__id
self._logger.emit_value(
metric_name, metric_value, extra_fields=extra_fields
)
except psutil.NoSuchProcess:
self.__process = None
commandline = self._config.get("commandline", None)
pid = self._config.get("pid", None)
# commandline has precedence over pid
if commandline:
global_log.warn(
'Unable to find process with commandline "%s"' % (commandline)
)
elif pid:
global_log.warn('Unable to find process with pid "%s"' % (pid))
| {
"content_hash": "02756f26f21312308fc7f55b4f29c36b",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 350,
"avg_line_length": 38.572916666666664,
"alnum_prop": 0.5639057135141391,
"repo_name": "scalyr/scalyr-agent-2",
"id": "ca9639fb4355361ff1f42be172d06c31ab7b542b",
"size": "25943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scalyr_agent/builtin_monitors/windows_process_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1926"
},
{
"name": "Dockerfile",
"bytes": "16280"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "JavaScript",
"bytes": "2191"
},
{
"name": "Jinja",
"bytes": "31315"
},
{
"name": "PowerShell",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "5244579"
},
{
"name": "Shell",
"bytes": "173598"
},
{
"name": "XSLT",
"bytes": "1082"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
def center_data(X, cols_to_center):
"""
Parameters:
@X {2D numpy array} dataset with variables to be centered
@cols_to_center {list} list of columns (i.e., variables to center)
Return:
@centered_data {2D numpy array} the dataset, but with the specified
columns centered
This function centers (i.e., de-means) the data
"""
# get the count of rows and cols of X
n_row = X.shape[0]
n_col = X.shape[1]
# create a matrix to hold the new, centered data
centered_data = np.zeros([n_row, n_col])
# loop over the cols, compute the mean, and subtract it
for i in range(0, n_col):
if i in cols_to_center:
# compute the mean of the original data
mean = np.mean(X[:, i], axis=0)
# remove the mean
centered_data[:, i] = X[:, i] - mean
else:
# if the col is not to be centered (e.g., in the case
# of a categorical variable, leave it as is
centered_data[:, i] = X[:, i]
# return the centered data
return centered_data
def scale_data(X, cols_to_scale):
"""
Parameters:
@X {2D numpy array} dataset with variables to be centered
@cols_to_scale {list} list of columns (i.e., variables) to scale
such that they have unit variance
Return:
@scaled_data {2D numpy array} the dataset, but with the specified
columns scaled
This function scales the data to have unit variance
"""
# get the count of rows and cols
n_row = X.shape[0]
n_col = X.shape[1]
# create a matrix to hold the new, scaled data
scaled_data = np.zeros([n_row, n_col])
# loop over the cols, compute the standard deviation, and divide by it
for i in range(0, n_col):
if i in cols_to_scale:
# compute the stdev of the original data
st_dev = np.std(X[:, i], axis=0)
scaled_data[:, i] = X[:, i]/st_dev
else:
scaled_data[:, i] = X[:, i]
# return the scaled data
return scaled_data
def lasso_loss(X, y, beta, lam, shooting=False):
"""
Parameters:
@X {2D numpy array} matrix of predictor variables
@y {numpy array} continuous response variable
@beta {numpy array} vector of regression coefficients
@lam {float} regularization parameter
@shooting {optional, boolean} Flag to indicate if we are
evaluating the loss for the shooting algorithm which,
per Murphy (2012) (see paper for full reference), uses
a lasso loss function without a 0.5 factor in front of the
RSS(\beta).
Return:
@lasso_loss {float} the value of the lasso loss function
(i.e., a least squares loss plus an L1 penalty) evaluated
at the given set of inputs
This function computes the value of the lasso loss function
at a given set of input data, regression coefficients, and
regularization parameter value
"""
if not shooting:
# compute the least squares (LS) loss
ls_loss = 0.5 * np.linalg.norm(y - np.dot(X, beta))**2
else:
# compute the least squares (LS) loss as the RSS
ls_loss = np.linalg.norm(y - np.dot(X, beta))**2
# compute the penalty term
penalty_term = lam * np.sum(np.abs(beta))
# return the loss function
return ls_loss + penalty_term
def calc_b_0(orig_y, orig_X, beta):
"""
Parameters:
@orig_X {2D numpy array} dataset of predictor variables that has not
been centered or scaled
@orig_y {1D numpy array} original response variable that has not been
centered or scaled
@beta {1D numpy array} vector of regression coefficients
Return:
@b_0 {float} the value of the intercept for the regression problem
This function recover the value of b_0, i.e., the intercept that was
lost as a result of centering the data
"""
# calculate the value of the intercept
b_0 = np.mean(orig_y) - np.dot(np.mean(orig_X, axis=0), beta)
# return the intercept
return b_0
def plot_lasso_coef_profiles(lam_grid, coeffs, col_headers=[]):
"""
Parameters:
@lam_grid {1D numpy array} grid regularization parameters used
@coeffs {2D numpy array} array of regression coefficients where each row
corresponds to a different lambda value and each column is a different
predictor
@col_headers {optional, list} list of column headers to put into a legend
This function implements the shooting algorithm which uses coordinate
descent in order to solve the lasso problem. It also makes use of warm
starts and considers a grid of lambda values with log spacing.
Return (technically just displayed):
@plt {matplotlib plot} Plot of coeff vals vs. log_10(lambda)
"""
# get lambda in log-scale so the graph is more readable
log_lam = np.log10(lam_grid)
# loop over the cols of the matrix, where each col contains a different
# predictor and each row corresponds to the value of that predictor under
# the given lambdavalue and plot coeff val vs. log lam
for j in range(0,coeffs.shape[1]):
plt.plot(log_lam,coeffs[:,j],marker="o")
# plot formatting
plt.title("Lasso Coefficient Paths")
plt.xlabel("Log_10 lambda")
plt.ylabel("Coefficient Value")
# check if the col_header list arg is empty, if not, use what
# the user provided as elements in the legend
if col_headers:
plt.legend(col_headers,loc='center left', bbox_to_anchor=(1,0.5),fontsize=14)
# display the plot
plt.show()
| {
"content_hash": "0cd36c343b140460d148d7ee9f80ac3b",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 85,
"avg_line_length": 35.388888888888886,
"alnum_prop": 0.6335252049537764,
"repo_name": "johnash1990/stat527_final_project",
"id": "af47031f712e14cc80047ca62de32788b808b58f",
"size": "5733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lasso_utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1240736"
},
{
"name": "Python",
"bytes": "37953"
}
],
"symlink_target": ""
} |
from openerp.osv import fields, osv
class stock_warehouse_orderpoint(osv.osv):
_inherit = "stock.warehouse.orderpoint"
_columns = {
'calendar_id': fields.many2one('resource.calendar', 'Calendar',
help="In the calendar you can define the days that the goods will be delivered. That way the scheduler will only take into account the goods needed until the second delivery and put the procurement date as the first delivery. "),
'purchase_calendar_id': fields.many2one('resource.calendar', 'Purchase Calendar'),
'last_execution_date': fields.datetime('Last Execution Date', readonly=True),
}
| {
"content_hash": "a3449970a5a7050333afa3b2108043e9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 270,
"avg_line_length": 60.72727272727273,
"alnum_prop": 0.6841317365269461,
"repo_name": "vileopratama/vitech",
"id": "7388990026d5b5081db4e8e966274709c9d52152",
"size": "668",
"binary": false,
"copies": "55",
"ref": "refs/heads/master",
"path": "src/addons/stock_calendar/stock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
"""This script is used to download the clang update script. It runs as a
gclient hook.
It's equivalent to using curl to download the latest update script:
$ curl --silent --create-dirs -o tools/clang/scripts/update.py \
https://raw.githubusercontent.com/chromium/chromium/master/tools/clang/scripts/update.py
The purpose of "reinventing the wheel" with this script is just so developers
aren't required to have curl installed.
"""
import argparse
import os
import sys
try:
from urllib2 import HTTPError, URLError, urlopen
except ImportError: # For Py3 compatibility
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
SCRIPT_DOWNLOAD_URL = ('https://raw.githubusercontent.com/' +
'chromium/chromium/master/tools/clang/scripts/update.py')
def main():
parser = argparse.ArgumentParser(
description='Download clang update script from chromium master.')
parser.add_argument('--output',
help='Path to script file to create/overwrite.')
args = parser.parse_args()
if not args.output:
print('usage: download-clang-update-script.py ' +
'--output=tools/clang/scripts/update.py');
return 1
script_contents = ''
try:
response = urlopen(SCRIPT_DOWNLOAD_URL)
script_contents = response.read()
except HTTPError as e:
print e.code
print e.read()
return 1
except URLError as e:
print 'Download failed. Reason: ', e.reason
return 1
directory = os.path.dirname(args.output)
if not os.path.exists(directory):
os.makedirs(directory)
script_file = open(args.output, 'w')
script_file.write(script_contents)
script_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "52d22e85b51768aadc0b5e77118d7018",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 94,
"avg_line_length": 28.59016393442623,
"alnum_prop": 0.6972477064220184,
"repo_name": "endlessm/chromium-browser",
"id": "0d707060920f869f74cf48316d679bde73eae869",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/openscreen/src/tools/download-clang-update-script.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('olympic', '0008_add_wa_age_group_categories'),
]
operations = [
migrations.AddField(
model_name='olympicround',
name='team_type',
field=models.CharField(blank=True, choices=[('', 'Individual'), ('T', 'Team'), ('X', 'Mixed team')], default='', max_length=1),
),
]
| {
"content_hash": "18640d96a14b779c91958c5709911846",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 139,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.5859213250517599,
"repo_name": "mjtamlyn/archery-scoring",
"id": "74e1ef7fa5e0aea692c100b266550e29e7a8cad3",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "olympic/migrations/0009_team_h2h.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243118"
},
{
"name": "HTML",
"bytes": "98205"
},
{
"name": "JavaScript",
"bytes": "255325"
},
{
"name": "Makefile",
"bytes": "173"
},
{
"name": "Python",
"bytes": "361105"
},
{
"name": "Ruby",
"bytes": "894"
},
{
"name": "Shell",
"bytes": "2460"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context.get('show_delete', False))),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
}
submit_row = register.inclusion_tag('nexus/admin/submit_line.html', takes_context=True)(submit_row)
| {
"content_hash": "d75213deb5f74de8410947855884f099",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 99,
"avg_line_length": 39.91304347826087,
"alnum_prop": 0.6078431372549019,
"repo_name": "brilliant-org/nexus",
"id": "d4ca8302851f22a0722ba13601290ca61ce332c9",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nexus/templatetags/nexus_admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24685"
},
{
"name": "HTML",
"bytes": "28269"
},
{
"name": "JavaScript",
"bytes": "11029"
},
{
"name": "Python",
"bytes": "36647"
}
],
"symlink_target": ""
} |
"""
Runs GATK best practices pipeline for germline SNP and INDEL discovery.
BWA Alignment
0: Download FASTQ(s) or BAM
1: Align to reference
2: Sort BAM
3: Index Bam
GATK Preprocessing
4: Mark duplicates
5: Indel realignment
6: Base quality score recalibration
7: Apply recalibration
GATK Variant Discovery
8: Call variants
9: Merge variant calls
10: Genotype variants
GATK Hard Filtering or VQSR
11: Isolate SNPs
12: Isolate Indels
13: Apply SNP filter
14: Apply Indel filter
15: Merge SNP and Indel VCFs
11: Recalibrate SNPs
12: Recalibrate Indels
13: Apply SNP recalibration
14: Apply Indel recalibration
===================================================================
Dependencies
Python 2.7
curl - apt-get install curl
virtualenv - apt-get install python-virtualenv
pip - apt-get install python-pip
toil - pip install toil
docker - http://docs.docker.com/engine/installation/
"""
from __future__ import print_function
import argparse
from collections import namedtuple
from copy import deepcopy
import logging
import os
import re
from urlparse import urlparse
from bd2k.util.humanize import human2bytes
from bd2k.util.processes import which
from toil.job import Job, PromisedRequirement
from toil_lib import require
from toil_lib.files import generate_file
from toil_lib.programs import docker_call
from toil_lib.tools.aligners import run_bwakit
from toil_lib.tools.indexing import run_samtools_faidx
from toil_lib.tools.preprocessing import run_gatk_preprocessing, \
run_picard_create_sequence_dictionary, run_samtools_index, run_samtools_sort
from toil_lib.tools.variant_annotation import gatk_genotype_gvcfs, run_oncotator
from toil_lib.urls import download_url_job
import yaml
from toil_scripts.gatk_germline.common import output_file_job
from toil_scripts.gatk_germline.germline_config_manifest import generate_config, generate_manifest
from toil_scripts.gatk_germline.hard_filter import hard_filter_pipeline
from toil_scripts.gatk_germline.vqsr import vqsr_pipeline
logging.basicConfig(level=logging.INFO)
class GermlineSample(namedtuple('GermlineSample', 'uuid url paired_url rg_line')):
"""
Namedtuple subclass for Toil Germline samples.
Attributes
uuid: unique sample identifier
url: URL/PATH to FASTQ or BAM file
paired_url: URL/PATH to paired FASTQ file, or None if BAM file
rg_line: Read group information (i.e. @RG\tID:foo\tSM:bar), or None if BAM file
"""
def run_gatk_germline_pipeline(job, samples, config):
"""
Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list[GermlineSample] samples: List of GermlineSample namedtuples
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.preprocess_only If True, then stops pipeline after preprocessing steps
config.joint_genotype If True, then joint genotypes cohort
config.run_oncotator If True, then adds Oncotator to pipeline
Additional parameters are needed for downstream steps. Refer to pipeline README for more information.
"""
# Determine the available disk space on a worker node before any jobs have been run.
work_dir = job.fileStore.getLocalTempDir()
st = os.statvfs(work_dir)
config.available_disk = st.f_bavail * st.f_frsize
# Check that there is a reasonable number of samples for joint genotyping
num_samples = len(samples)
if config.joint_genotype and not 30 < num_samples < 200:
job.fileStore.logToMaster('WARNING: GATK recommends batches of '
'30 to 200 samples for joint genotyping. '
'The current cohort has %d samples.' % num_samples)
shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate()
job.addChild(shared_files)
if config.preprocess_only:
for sample in samples:
shared_files.addChildJobFn(prepare_bam,
sample.uuid,
sample.url,
shared_files.rv(),
paired_url=sample.paired_url,
rg_line=sample.rg_line)
else:
run_pipeline = Job.wrapJobFn(gatk_germline_pipeline,
samples,
shared_files.rv()).encapsulate()
shared_files.addChild(run_pipeline)
if config.run_oncotator:
annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv())
run_pipeline.addChild(annotate)
def gatk_germline_pipeline(job, samples, config):
"""
Runs the GATK best practices pipeline for germline SNP and INDEL discovery.
Steps in Pipeline
0: Generate and preprocess BAM
- Uploads processed BAM to output directory
1: Call Variants using HaplotypeCaller
- Uploads GVCF
2: Genotype VCF
- Uploads VCF
3: Filter Variants using either "hard filters" or VQSR
- Uploads filtered VCF
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list[GermlineSample] samples: List of GermlineSample namedtuples
:param Namespace config: Input parameters and reference FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.joint_genotype If True, then joint genotype and filter cohort
config.hc_output URL or local path to HaplotypeCaller output for testing
:return: Dictionary of filtered VCF FileStoreIDs
:rtype: dict
"""
require(len(samples) > 0, 'No samples were provided!')
# Get total size of genome reference files. This is used for configuring disk size.
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# 0: Generate processed BAM and BAI files for each sample
# group preprocessing and variant calling steps in empty Job instance
group_bam_jobs = Job()
gvcfs = {}
for sample in samples:
# 0: Generate processed BAM and BAI files for each sample
get_bam = group_bam_jobs.addChildJobFn(prepare_bam,
sample.uuid,
sample.url,
config,
paired_url=sample.paired_url,
rg_line=sample.rg_line)
# 1: Generate per sample gvcfs {uuid: gvcf_id}
# The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference
# files, and the output GVCF file. The output GVCF is smaller than the input BAM file.
hc_disk = PromisedRequirement(lambda bam, bai, ref_size:
2 * bam.size + bai.size + ref_size,
get_bam.rv(0),
get_bam.rv(1),
genome_ref_size)
get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller,
get_bam.rv(0),
get_bam.rv(1),
config.genome_fasta, config.genome_fai, config.genome_dict,
annotations=config.annotations,
cores=config.cores,
disk=hc_disk,
memory=config.xmx,
hc_output=config.hc_output)
# Store cohort GVCFs in dictionary
gvcfs[sample.uuid] = get_gvcf.rv()
# Upload individual sample GVCF before genotyping to a sample specific output directory
vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix)
get_gvcf.addChildJobFn(output_file_job,
vqsr_name,
get_gvcf.rv(),
os.path.join(config.output_dir, sample.uuid),
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, get_gvcf.rv()))
# VQSR requires many variants in order to train a decent model. GATK recommends a minimum of
# 30 exomes or one large WGS sample:
# https://software.broadinstitute.org/gatk/documentation/article?id=3225
filtered_vcfs = {}
if config.joint_genotype:
# Need to configure joint genotype in a separate function to resolve promises
filtered_vcfs = group_bam_jobs.addFollowOnJobFn(joint_genotype_and_filter,
gvcfs,
config).rv()
# If not joint genotyping, then iterate over cohort and genotype and filter individually.
else:
for uuid, gvcf_id in gvcfs.iteritems():
filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn(genotype_and_filter,
{uuid: gvcf_id},
config).rv()
job.addChild(group_bam_jobs)
return filtered_vcfs
def joint_genotype_and_filter(job, gvcfs, config):
"""
Checks for enough disk space for joint genotyping, then calls the genotype and filter pipeline function.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID}
:param Namespace config: Input parameters and reference FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.available_disk Total available disk space
:returns: FileStoreID for the joint genotyped and filtered VCF file
:rtype: str
"""
# Get the total size of genome reference files
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# Require at least 2.5x the sum of the individual GVCF files
cohort_size = sum(gvcf.size for gvcf in gvcfs.values())
require(int(2.5 * cohort_size + genome_ref_size) < config.available_disk,
'There is not enough disk space to joint '
'genotype samples:\n{}'.format('\n'.join(gvcfs.keys())))
job.fileStore.logToMaster('Merging cohort into a single GVCF file')
return job.addChildJobFn(genotype_and_filter, gvcfs, config).rv()
def genotype_and_filter(job, gvcfs, config):
"""
Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file
to the config output directory.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID}
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.unsafe_mode If True, then run GATK tools in UNSAFE mode
:return: FileStoreID for genotyped and filtered VCF file
:rtype: str
"""
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# GenotypeGVCF disk requirement depends on the input GVCF, the genome reference files, and
# the output VCF file. The output VCF is smaller than the input GVCF.
genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size:
2 * sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size,
gvcfs.values(),
genome_ref_size)
genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs,
gvcfs,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
annotations=config.annotations,
unsafe_mode=config.unsafe_mode,
cores=config.cores,
disk=genotype_gvcf_disk,
memory=config.xmx)
# Determine if output GVCF has multiple samples
if len(gvcfs) == 1:
uuid = gvcfs.keys()[0]
else:
uuid = 'joint_genotyped'
genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix)
genotype_gvcf.addChildJobFn(output_file_job,
genotyped_filename,
genotype_gvcf.rv(),
os.path.join(config.output_dir, uuid),
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, genotype_gvcf.rv()))
if config.run_vqsr:
if not config.joint_genotype:
job.fileStore.logToMaster('WARNING: Running VQSR without joint genotyping.')
joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline,
uuid,
genotype_gvcf.rv(),
config)
else:
joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(hard_filter_pipeline,
uuid,
genotype_gvcf.rv(),
config)
return joint_genotype_vcf.rv()
def annotate_vcfs(job, vcfs, config):
"""
Runs Oncotator for a group of VCF files. Each sample is annotated individually.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID}
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.oncotator_db FileStoreID to Oncotator database
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
"""
job.fileStore.logToMaster('Running Oncotator on the following samples:\n%s' % '\n'.join(vcfs.keys()))
for uuid, vcf_id in vcfs.iteritems():
# The Oncotator disk requirement depends on the input VCF, the Oncotator database
# and the output VCF. The annotated VCF will be significantly larger than the input VCF.
onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db.size,
vcf_id,
config.oncotator_db)
annotated_vcf = job.addChildJobFn(run_oncotator,
vcf_id,
config.oncotator_db,
disk=onco_disk,
cores=config.cores,
memory=config.xmx)
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix)
annotated_vcf.addChildJobFn(output_file_job,
filename,
annotated_vcf.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, annotated_vcf.rv()))
# Pipeline convenience functions
def parse_manifest(path_to_manifest):
"""
Parses manifest file for Toil Germline Pipeline
:param str path_to_manifest: Path to sample manifest file
:return: List of GermlineSample namedtuples
:rtype: list[GermlineSample]
"""
bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)"
fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)"
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('#'):
continue
bam_match = re.match(bam_re, line)
fastq_match = re.match(fq_re, line)
if bam_match:
uuid = bam_match.group('uuid')
url = bam_match.group('url')
paired_url = None
rg_line = None
require('.bam' in url.lower(),
'Expected .bam extension:\n{}:\t{}'.format(uuid, url))
elif fastq_match:
uuid = fastq_match.group('uuid')
url = fastq_match.group('url')
paired_url = fastq_match.group('paired_url')
rg_line = fastq_match.group('rg_line')
require('.fq' in url.lower() or '.fastq' in url.lower(),
'Expected .fq extension:\n{}:\t{}'.format(uuid, url))
else:
raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line))
# Checks that URL has a scheme
require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url))
samples.append(GermlineSample(uuid, url, paired_url, rg_line))
return samples
def download_shared_files(job, config):
"""
Downloads shared reference files for Toil Germline pipeline
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options
:return: Updated config with shared fileStoreIDS
:rtype: Namespace
"""
job.fileStore.logToMaster('Downloading shared reference files')
shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'}
nonessential_files = {'genome_fai', 'genome_dict'}
# Download necessary files for pipeline configuration
if config.run_bwa:
shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'}
nonessential_files.add('alt')
if config.preprocess:
shared_files |= {'g1k_indel', 'mills', 'dbsnp'}
if config.run_vqsr:
shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}
if config.run_oncotator:
shared_files.add('oncotator_db')
for name in shared_files:
try:
url = getattr(config, name, None)
if url is None:
continue
setattr(config, name, job.addChildJobFn(download_url_job,
url,
name=name,
s3_key_path=config.ssec,
disk='15G' # Estimated reference file size
).rv())
finally:
if getattr(config, name, None) is None and name not in nonessential_files:
raise ValueError("Necessary configuration parameter is missing:\n{}".format(name))
return job.addFollowOnJobFn(reference_preprocessing, config).rv()
def reference_preprocessing(job, config):
"""
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace
"""
job.fileStore.logToMaster('Preparing Reference Files')
genome_id = config.genome_fasta
if getattr(config, 'genome_fai', None) is None:
config.genome_fai = job.addChildJobFn(run_samtools_faidx,
genome_id,
cores=config.cores).rv()
if getattr(config, 'genome_dict', None) is None:
config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary,
genome_id,
cores=config.cores,
memory=config.xmx).rv()
return config
def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None):
"""
Prepares BAM file for Toil germline pipeline.
Steps in pipeline
0: Download and align BAM or FASTQ sample
1: Sort BAM
2: Index BAM
3: Run GATK preprocessing pipeline (Optional)
- Uploads preprocessed BAM to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique identifier for the sample
:param str url: URL or local path to BAM file or FASTQs
:param Namespace config: Configuration options for pipeline
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.g1k_indel FileStoreID for 1000G INDEL resource file
config.mills FileStoreID for Mills resource file
config.dbsnp FileStoreID for dbSNP resource file
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.cores Number of cores for each job
config.xmx Java heap size in bytes
:param str|None paired_url: URL or local path to paired FASTQ file, default is None
:param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None
:return: BAM and BAI FileStoreIDs
:rtype: tuple
"""
# 0: Align FASTQ or realign BAM
if config.run_bwa:
get_bam = job.wrapJobFn(setup_and_run_bwakit,
uuid,
url,
rg_line,
config,
paired_url=paired_url).encapsulate()
# 0: Download BAM
elif '.bam' in url.lower():
job.fileStore.logToMaster("Downloading BAM: %s" % uuid)
get_bam = job.wrapJobFn(download_url_job,
url,
name='toil.bam',
s3_key_path=config.ssec,
disk=config.file_size).encapsulate()
else:
raise ValueError('Could not generate BAM file for %s\n'
'Provide a FASTQ URL and set run-bwa or '
'provide a BAM URL that includes .bam extension.' % uuid)
# 1: Sort BAM file if necessary
# Realigning BAM file shuffles read order
if config.sorted and not config.run_bwa:
sorted_bam = get_bam
else:
# The samtools sort disk requirement depends on the input bam, the tmp files, and the
# sorted output bam.
sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv())
sorted_bam = get_bam.addChildJobFn(run_samtools_sort,
get_bam.rv(),
cores=config.cores,
disk=sorted_bam_disk)
# 2: Index BAM
# The samtools index disk requirement depends on the input bam and the output bam index
index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv())
index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk)
job.addChild(get_bam)
sorted_bam.addChild(index_bam)
if config.preprocess:
preprocess = job.wrapJobFn(run_gatk_preprocessing,
sorted_bam.rv(),
index_bam.rv(),
config.genome_fasta,
config.genome_dict,
config.genome_fai,
config.g1k_indel,
config.mills,
config.dbsnp,
memory=config.xmx,
cores=config.cores).encapsulate()
sorted_bam.addChild(preprocess)
index_bam.addChild(preprocess)
# Update output BAM promises
output_bam_promise = preprocess.rv(0)
output_bai_promise = preprocess.rv(1)
# Save processed BAM
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix)
output_bam = job.wrapJobFn(output_file_job,
filename,
preprocess.rv(0),
output_dir,
s3_key_path=config.ssec)
preprocess.addChild(output_bam)
else:
output_bam_promise = sorted_bam.rv()
output_bai_promise = index_bam.rv()
return output_bam_promise, output_bai_promise
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None):
"""
Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str
"""
bwa_config = deepcopy(config)
bwa_config.uuid = uuid
bwa_config.rg_line = rg_line
# bwa_alignment uses a different naming convention
bwa_config.ref = config.genome_fasta
bwa_config.fai = config.genome_fai
# Determine if sample is a FASTQ or BAM file using the file extension
basename, ext = os.path.splitext(url)
ext = ext.lower()
if ext == '.gz':
_, ext = os.path.splitext(basename)
ext = ext.lower()
# The pipeline currently supports FASTQ and BAM files
require(ext in ['.fq', '.fastq', '.bam'],
'Please use .fq or .bam file extensions:\n%s' % url)
# Download fastq files
samples = []
input1 = job.addChildJobFn(download_url_job,
url,
name='file1',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input1.rv())
# If the extension is for a BAM file, then configure bwakit to realign the BAM file.
if ext == '.bam':
bwa_config.bam = input1.rv()
else:
bwa_config.r1 = input1.rv()
# Download the paired FASTQ URL
if paired_url:
input2 = job.addChildJobFn(download_url_job,
paired_url,
name='file2',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input2.rv())
bwa_config.r2 = input2.rv()
# The bwakit disk requirement depends on the size of the input files and the index
# Take the sum of the input files and scale it by a factor of 4
bwa_index_size = sum([getattr(config, index_file).size
for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt']
if getattr(config, index_file, None) is not None])
bwakit_disk = PromisedRequirement(lambda lst, index_size:
int(4 * sum(x.size for x in lst) + index_size),
samples,
bwa_index_size)
return job.addFollowOnJobFn(run_bwakit,
bwa_config,
sort=False, # BAM files are sorted later in the pipeline
trim=config.trim,
cores=config.cores,
disk=bwakit_disk).rv()
def gatk_haplotype_caller(job,
bam, bai,
ref, fai, ref_dict,
annotations=None,
emit_threshold=10.0, call_threshold=30.0,
unsafe_mode=False,
hc_output=None):
"""
Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param list[str] annotations: List of GATK variant annotations, default is None
:param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:param str hc_output: URL or local path to pre-cooked VCF file, default is None
:return: FileStoreID for GVCF file
:rtype: str
"""
job.fileStore.logToMaster('Running GATK HaplotypeCaller')
inputs = {'genome.fa': ref,
'genome.fa.fai': fai,
'genome.dict': ref_dict,
'input.bam': bam,
'input.bam.bai': bai}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call GATK -- HaplotypeCaller with parameters to produce a genomic VCF file:
# https://software.broadinstitute.org/gatk/documentation/article?id=2803
command = ['-T', 'HaplotypeCaller',
'-nct', str(job.cores),
'-R', 'genome.fa',
'-I', 'input.bam',
'-o', 'output.g.vcf',
'-stand_call_conf', str(call_threshold),
'-stand_emit_conf', str(emit_threshold),
'-variant_index_type', 'LINEAR',
'-variant_index_parameter', '128000',
'--genotyping_mode', 'Discovery',
'--emitRefConfidence', 'GVCF']
if unsafe_mode:
command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command
if annotations:
for annotation in annotations:
command.extend(['-A', annotation])
# Uses docker_call mock mode to replace output with hc_output file
outputs = {'output.g.vcf': hc_output}
docker_call(job=job, work_dir=work_dir,
env={'JAVA_OPTS': '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)},
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
inputs=inputs.keys(),
outputs=outputs,
mock=True if outputs['output.g.vcf'] else False)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf'))
def main():
"""
GATK germline pipeline with variant filtering and annotation.
"""
# Define Parser object and add to jobTree
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
# Generate subparsers
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('generate-config',
help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest',
help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate',
help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the GATK germline pipeline')
parser_run.add_argument('--config',
required=True,
type=str,
help='Path to the (filled in) config file, generated with '
'"generate-config".')
parser_run.add_argument('--manifest',
type=str,
help='Path to the (filled in) manifest file, generated with '
'"generate-manifest".\nDefault value: "%(default)s".')
parser_run.add_argument('--sample',
default=None,
nargs=2,
type=str,
help='Input sample identifier and BAM file URL or local path')
parser_run.add_argument('--output-dir',
default=None,
help='Path/URL to output directory')
parser_run.add_argument('-s', '--suffix',
default=None,
help='Additional suffix to add to the names of the output files')
parser_run.add_argument('--preprocess-only',
action='store_true',
help='Only runs preprocessing steps')
Job.Runner.addToilOptions(parser_run)
options = parser.parse_args()
cwd = os.getcwd()
if options.command == 'generate-config' or options.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-germline.yaml'), generate_config)
if options.command == 'generate-manifest' or options.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-germline.tsv'), generate_manifest)
elif options.command == 'run':
# Program checks
for program in ['curl', 'docker']:
require(next(which(program)),
program + ' must be installed on every node.'.format(program))
require(os.path.exists(options.config), '{} not found. Please run "generate-config"'.format(options.config))
# Read sample manifest
samples = []
if options.manifest:
samples.extend(parse_manifest(options.manifest))
# Add BAM sample from command line
if options.sample:
uuid, url = options.sample
# samples tuple: (uuid, url, paired_url, rg_line)
# BAM samples should not have as paired URL or read group line
samples.append(GermlineSample(uuid, url, None, None))
require(len(samples) > 0,
'No samples were detected in the manifest or on the command line')
# Parse inputs
inputs = {x.replace('-', '_'): y for x, y in
yaml.load(open(options.config).read()).iteritems()}
required_fields = {'genome_fasta',
'output_dir',
'run_bwa',
'sorted',
'snp_filter_annotations',
'indel_filter_annotations',
'preprocess',
'preprocess_only',
'run_vqsr',
'joint_genotype',
'run_oncotator',
'cores',
'file_size',
'xmx',
'suffix'}
input_fields = set(inputs.keys())
require(input_fields > required_fields,
'Missing config parameters:\n{}'.format(', '.join(required_fields - input_fields)))
if inputs['output_dir'] is None:
inputs['output_dir'] = options.output_dir
require(inputs['output_dir'] is not None,
'Missing output directory PATH/URL')
if inputs['suffix'] is None:
inputs['suffix'] = options.suffix if options.suffix else ''
if inputs['preprocess_only'] is None:
inputs['preprocess_only'] = options.preprocess_only
if inputs['run_vqsr']:
# Check that essential VQSR parameters are present
vqsr_fields = {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}
require(input_fields > vqsr_fields,
'Missing parameters for VQSR:\n{}'.format(', '.join(vqsr_fields - input_fields)))
# Check that hard filtering parameters are present. If only running preprocessing steps, then we do
# not need filtering information.
elif not inputs['preprocess_only']:
hard_filter_fields = {'snp_filter_name', 'snp_filter_expression',
'indel_filter_name', 'indel_filter_expression'}
require(input_fields > hard_filter_fields,
'Missing parameters for hard filtering:\n{}'.format(', '.join(hard_filter_fields - input_fields)))
# Check for falsey hard filtering parameters
for hard_filter_field in hard_filter_fields:
require(inputs[hard_filter_field], 'Missing %s value for hard filtering, '
'got %s.' % (hard_filter_field, inputs[hard_filter_field]))
# Set resource parameters
inputs['xmx'] = human2bytes(inputs['xmx'])
inputs['file_size'] = human2bytes(inputs['file_size'])
inputs['cores'] = int(inputs['cores'])
inputs['annotations'] = set(inputs['snp_filter_annotations'] + inputs['indel_filter_annotations'])
# HaplotypeCaller test data for testing
inputs['hc_output'] = inputs.get('hc_output', None)
# It is a toil-scripts convention to store input parameters in a Namespace object
config = argparse.Namespace(**inputs)
root = Job.wrapJobFn(run_gatk_germline_pipeline, samples, config)
Job.Runner.startToil(root, options)
if __name__ == '__main__':
main()
| {
"content_hash": "d473dab664b0b9cb43bd1318dcc82935",
"timestamp": "",
"source": "github",
"line_count": 897,
"max_line_length": 120,
"avg_line_length": 45.99888517279822,
"alnum_prop": 0.5701752260003393,
"repo_name": "BD2KGenomics/toil-scripts",
"id": "de7c0fae9dfea46a0cd09b19b29e3a92ebcaf356",
"size": "41286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/toil_scripts/gatk_germline/germline.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3068"
},
{
"name": "Python",
"bytes": "284050"
},
{
"name": "Shell",
"bytes": "8996"
}
],
"symlink_target": ""
} |
__author__ = "Alok Kumar"
from setuptools import setup, find_packages
setup(
name="wc14",
version="0.2",
author="Alok Kumar",
author_email="rajalokan@gmail.com",
url="https://github.com/rajalokan/wc14",
download_url="https://github.com/rajalokan/wc14/archive/0.1.tar.gz",
description=("Command line tool to keep you updated about Football world cup 2014."),
packages=find_packages(),
include_package_data=True,
install_requires=['click', 'requests', 'colorama', 'humanize'],
entry_points='''
[console_scripts]
wc14=wc14.cli:cli
''',
)
| {
"content_hash": "0c9682429581631a3948ea12116f5f5f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 89,
"avg_line_length": 30.05,
"alnum_prop": 0.6505823627287853,
"repo_name": "rajalokan/wc14",
"id": "71e8a1c9dc99ed93dd1f6459b289a901781c4b48",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7046"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import mimetypes
from os import path
import unittest
from django.conf.urls.static import static
from django.http import HttpResponseNotModified
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.http import http_date
from django.views.static import was_modified_since
from .. import urls
from ..urls import media_dir
@override_settings(DEBUG=True)
class StaticTests(SimpleTestCase):
"""Tests django views in django/views/static.py"""
prefix = 'site_media'
def test_serve(self):
"The static view can serve static media"
media_files = ['file.txt', 'file.txt.gz']
for filename in media_files:
response = self.client.get('/views/%s/%s' % (self.prefix, filename))
response_content = b''.join(response)
file_path = path.join(media_dir, filename)
with open(file_path, 'rb') as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response['Content-Length']))
self.assertEqual(mimetypes.guess_type(file_path)[1], response.get('Content-Encoding', None))
def test_unknown_mime_type(self):
response = self.client.get('/views/%s/file.unknown' % self.prefix)
self.assertEqual('application/octet-stream', response['Content-Type'])
def test_copes_with_empty_path_component(self):
file_name = 'file.txt'
response = self.client.get('/views/%s//%s' % (self.prefix, file_name))
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
def test_is_modified_since(self):
file_name = 'file.txt'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Thu, 1 Jan 1970 00:00:00 GMT')
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
def test_not_modified_since(self):
file_name = 'file.txt'
response = self.client.get(
'/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Mon, 18 Jan 2038 05:14:07 GMT'
# This is 24h before max Unix time. Remember to fix Django and
# update this test well before 2038 :)
)
self.assertIsInstance(response, HttpResponseNotModified)
def test_invalid_if_modified_since(self):
"""Handle bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = 'Mon, 28 May 999999999999 28:25:26 GMT'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response['Content-Length']))
def test_invalid_if_modified_since2(self):
"""Handle even more bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = ': 1291108438, Wed, 20 Oct 2010 14:05:00 GMT'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response['Content-Length']))
def test_404(self):
response = self.client.get('/views/%s/non_existing_resource' % self.prefix)
self.assertEqual(404, response.status_code)
class StaticHelperTest(StaticTests):
"""
Test case to make sure the static URL pattern helper works as expected
"""
def setUp(self):
super(StaticHelperTest, self).setUp()
self._old_views_urlpatterns = urls.urlpatterns[:]
urls.urlpatterns += static('/media/', document_root=media_dir)
def tearDown(self):
super(StaticHelperTest, self).tearDown()
urls.urlpatterns = self._old_views_urlpatterns
class StaticUtilsTests(unittest.TestCase):
def test_was_modified_since_fp(self):
"""
Test that a floating point mtime does not disturb was_modified_since.
(#18675)
"""
mtime = 1343416141.107817
header = http_date(mtime)
self.assertFalse(was_modified_since(header, mtime))
| {
"content_hash": "85f199899d8b5afea1a5b9631e6a666c",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 104,
"avg_line_length": 40.59836065573771,
"alnum_prop": 0.6367857863920856,
"repo_name": "AlexHill/django",
"id": "34e4021080dd0d7b50325857d3ab50f4cc224854",
"size": "4953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/view_tests/tests/test_static.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51177"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Python",
"bytes": "9207507"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
""" All the sqlite3 functions needed for querying the db (encapsulates the sql code)
DB object used by the data_retriever"""
import os
import sqlite3
import threading
import time
class DB:
def __init__(self):
# establish a connection w/ the database (check_same_thread=False is possibly sketchy, needs more research)
self.conn = sqlite3.connect('database.db', check_same_thread=False)
# create a lock for syncronization
self.lock = threading.Lock()
self.conn.execute('PRAGMA FOREIGN_KEYS = ON')
# create the tables if not already in DB
self.conn.execute('''CREATE TABLE IF NOT EXISTS USER
(username TEXT PRIMARY KEY NOT NULL,
password TEXT NOT NULL,
question TEXT ,
answer TEXT ,
repo_id INTEGER NOT NULL,
FOREIGN KEY (repo_id) REFERENCES GROUPS(id));''')
self.conn.execute('''CREATE TABLE IF NOT EXISTS FILE
(filename TEXT NOT NULL,
owner TEXT NOT NULL,
timestamp TEXT,
notes TEXT,
group_id INTEGER,
mod_time INTEGER,
PRIMARY KEY (filename, group_id),
FOREIGN KEY (owner) REFERENCES USER(username),
FOREIGN KEY (group_id) REFERENCES GROUPS(id));''')
self.conn.execute('''CREATE TABLE IF NOT EXISTS TAG
(tagname TEXT PRIMARY KEY NOT NULL);''')
self.conn.execute('''CREATE TABLE IF NOT EXISTS GROUPS
(id INTEGER PRIMARY KEY,
groupname TEXT,
user_created BOOLEAN);''')
self.conn.execute('''CREATE TABLE IF NOT EXISTS USER_GROUP
(group_id INTEGER,
username TEXT,
FOREIGN KEY (group_id) REFERENCES GROUPS(id) ON DELETE CASCADE,
FOREIGN KEY (username) REFERENCES USER(username) ON DELETE CASCADE);''')
self.conn.execute('''CREATE TABLE IF NOT EXISTS FILE_TAG
(filename TEXT,
group_id INTEGER,
tagname TEXT,
FOREIGN KEY (filename, group_id) REFERENCES FILE(filename, group_id) ON DELETE CASCADE,
FOREIGN KEY (tagname) REFERENCES TAG(tagname) ON DELETE CASCADE ,
PRIMARY KEY (filename, tagname));''')
self.conn.execute(""" INSERT OR IGNORE INTO GROUPS(id, groupname, user_created)
VALUES (?,?,?)""", (0, 'SHARED_KM_REPO', False))
self.conn.execute(""" INSERT OR IGNORE INTO USER(username, password, repo_id)
VALUES (?,?,?)""", ('DUMMY_SHARED_USER', 'LOL NO PASS', 0))
path_exists = os.path.exists(
os.path.normpath(
os.path.join(
os.getcwd(),
'FILE_REPO',
'SHARED_KM_REPO')))
if not path_exists:
os.makedirs(
os.path.normpath(
os.path.join(
os.getcwd(),
'FILE_REPO',
'SHARED_KM_REPO')))
self.conn.commit()
def login(self, username, pword):
"""
Attempts to find an entry in the USERS table with the given parameters
:param username: username entered by user
:param pword: password entered by user
:return: False if username doesn't exist or incorrect password OR
True if username exists and enters correct password
"""
cursor = self.conn.cursor()
cursor.execute("SELECT * FROM USER WHERE username == ? AND password == ?", (username, pword))
# user will either be the one result, or 'None'
user = cursor.fetchone()
if user is None:
return None
elif user[0] == username:
return user[4]
def register(self, username, pword, sec_question, sec_answer):
"""
Attempts to enter a new username and pword into the USERS table
:param username: new username, MUST BE UNIQUE
:param pword: new password
:return: False if username is not unique (can't have duplicate usernames)
True if username is unique and user is put in db
"""
self.lock.acquire()
result = 0
c = self.conn.cursor()
try:
c.execute("INSERT INTO GROUPS(groupname, user_created) VALUES(?,?)", (username + "_personal_repo", False))
gid = c.lastrowid
print(type(gid))
c.execute("INSERT INTO USER(username, password, question, answer, repo_id) VALUES(?,?,?,?,?)", (username, pword, sec_question,
sec_answer,gid))
c.execute("INSERT INTO USER_GROUP(group_id, username) VALUES(?,?)", (gid, username))
self.conn.commit()
result = gid
# username is not unique
except sqlite3.Warning:
print('ignored')
except Exception as e:
print('Exception in register:', e)
finally:
self.lock.release()
return result
def create_group(self, gname, members):
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO GROUPS(groupname, user_created) VALUES(?,?)", (gname, True))
gid = cursor.lastrowid
cursor.executemany("INSERT OR IGNORE INTO USER_GROUP(group_id, username) VALUES(?,?)",
[(gid, member) for member in members])
self.conn.commit()
except sqlite3.Error as e:
print('Error in create_group', e)
return 0, None
return cursor.rowcount, gid
def delete_group(self, gname, members):
try:
cursor = self.conn.cursor()
cursor.execute("DELETE FROM GROUPS WHERE groupname=?", (gname))
except sqlite3.error as e:
print ('Error in delete_group', e)
return cursor.rowcount == 1
def add_user_to_group(self, gid, uname):
cursor = self.conn.cursor()
self.lock.acquire()
try:
cursor.execute("INSERT OR IGNORE INTO USER_GROUP(group_id, username) VALUES(?,?)",
(gid, uname))
self.conn.commit()
except sqlite3.Error as e:
print('Error in add_user_to_group', e)
finally:
self.lock.release()
return cursor.rowcount == 1
def does_user_exists(self, uname):
cursor = self.conn.cursor()
try:
cursor.execute("SELECT * FROM USER WHERE username=?", (uname,))
except Exception:
pass
result = cursor.fetchone()
resultlen = len(result)
if resultlen != 0:
return True
else:
return False
def delete_user_from_group(self, gid, uname):
cursor = self.conn.cursor()
self.lock.acquire()
try:
cursor.execute("DELETE FROM USER_GROUP WHERE username=? AND group_id=?",
(uname, gid))
self.conn.commit()
except sqlite3.Error as e:
print('Error in add_user_to_group', e)
finally:
self.lock.release()
if cursor.rowcount == 1:
return True
else:
return False
def retrieve_repo(self, gid):
cursor = self.conn.cursor()
try:
cursor.execute("SELECT * FROM FILE WHERE group_id=?",
(gid,))
files = cursor.fetchall()
result = []
print(files)
for file in files:
cursor.execute('SELECT tagname FROM FILE_TAG WHERE filename=? AND group_id=?',
(file[0], file[4]))
result.append(file + tuple(tag[0] for tag in cursor.fetchall()))
return result
except sqlite3.Error as e:
print('Error in retrieve_repo', e)
def get_username(self, gid):
cursor = self.conn.cursor()
try:
cursor.execute("SELECT username FROM USER_GROUP WHERE group_id=?",
(gid,))
return cursor.fetchone()[0]
except sqlite3.Error as e:
print('Error in get_username', e)
def repo_name(self, gid):
cursor = self.conn.cursor()
try:
cursor.execute("SELECT groupname FROM GROUPS WHERE id=?",
(gid,))
return cursor.fetchone()[0]
except sqlite3.Error as e:
print('Error in repo_name', e)
def upload(self, file_name, tags, owner, group_id, notes, mod_time): # Written by Ayad
"""
This method inserts data into the database
:param mod_time:
:param notes:
:param group_id:
:param file_name:
:param tags:
:param owner:
:return:
"""
try:
self.conn.execute("""INSERT INTO
FILE(filename, owner, timestamp, group_id, notes, mod_time)
VALUES (?,?,?,?,?,?)""",
(file_name, owner, time.time(), group_id, notes, mod_time))
for tag in tags:
self.conn.execute("INSERT OR IGNORE INTO TAG VALUES(?)",
(tag,))
self.conn.execute("INSERT INTO FILE_TAG(filename, group_id, tagname) VALUES(?,?,?)",
(file_name, group_id, tag))
self.conn.commit()
except sqlite3.Error as e:
print("An Error occurred in upload: " + str(e.args) + "\n\t\t all vars = " + str(locals()))
def get_personal_repo_id(self, uname):
return self.conn.execute('SELECT repo_id FROM USER WHERE username=?', (uname,)).fetchone()[0]
def get_groups(self, uname):
cursor = self.conn.cursor()
try:
cursor.execute("""SELECT GROUPS.id, GROUPS.groupname FROM
GROUPS INNER JOIN USER_GROUP
ON GROUPS.id = USER_GROUP.group_id
WHERE username = ? AND
GROUPS.groupname NOT LIKE '%personal_repo'
""",
(uname,))
except Exception as e:
print(e)
return cursor.fetchall()
def delete(self, fname, gid):
"""
Attempts to delete fileName from the FILES table
:param fileName: name of file
:return: False if file is not found
True if the file is found in the FILES table and is deleted
"""
self.lock.acquire()
cursor = self.conn.cursor()
try:
cursor.execute('DELETE FROM FILE WHERE filename=? AND group_id=?;', (fname, gid))
self.conn.commit()
except sqlite3.Error:
return False
finally:
self.lock.release()
if cursor.rowcount > 0:
return True
else:
return False
def __contains__(self, filename, owner):
cursor = self.conn.cursor()
cursor.execute("""SELECT FILE.filename
FROM FILE
WHERE filename = ? AND owner = ?
""", (filename, owner))
return cursor.rowcount > 0
def search(self, query, owner):
"""
Attempts to find all files matching the user's Query.
:param query: a query supplied by the user
:type query: dict
:return results: the set of rows in the format of (filename, timestamp, tagname) from the search
"""
results = set()
try:
cursor = self.conn.cursor()
if query['fname']:
cursor.execute(
"""SELECT FILE.filename, FILE.timestamp, TAG.tagname
FROM FILE INNER JOIN
(TAG INNER JOIN FILE_TAG
ON TAG.tagname = FILE_TAG.tagname)
ON FILE.filename = FILE_TAG.filename
WHERE FILE.filename LIKE ? AND FILE.owner = ?
""", ('%' + query['fname'] + '%', owner))
results.update(row for row in cursor)
if 'tags' in query:
tags = set(query['tags'])
if results: # filter results (Intersection of sets)
results = {result for result in results if result[2] in tags}
# result[2] == tagname
else: # (union of sets)
for tag in tags:
cursor.execute(
""" SELECT FILE.filename, FILE.timestamp, TAG.tagname
FROM FILE INNER JOIN
(TAG INNER JOIN FILE_TAG
ON TAG.tagname = FILE_TAG.tagname)
ON FILE.filename = FILE_TAG.filename
WHERE TAG.tagname LIKE ? AND FILE.owner = ?
""", ('%' + tag + '%', owner))
results.update(row for row in cursor if row not in results)
if 'ext' in query:
if results:
results = {result for result in results if result[0].endswith(query['ext'])}
else:
cursor.execute("SELECT * FROM FILE WHERE filename LIKE ? AND owner", ('%' + query['ext'], owner))
except Exception:
raise Exception
return results
if __name__ == '__main__':
db = DB()
results = db.search({'fname': 'py', 'tags': ['tag1']}, "owner")
print(results)
| {
"content_hash": "d5ef43639d7a41429a169da8ff30bf0a",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 138,
"avg_line_length": 39.08196721311475,
"alnum_prop": 0.49559563758389263,
"repo_name": "operant/knowledge-management",
"id": "985488eb74d49a5d51d3f2dfbb489bc4b9093ffe",
"size": "14593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/models/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150654"
}
],
"symlink_target": ""
} |
import os
import sys
import time
from sys import platform as _platform
class colors:
red = '\033[31m' # red
blue = '\033[34m' # blue
magenta = '\033[35m' # purple
cyan = '\033[36m' # cyan
reset = "\033[0m"
def main_master():
if _platform == "linux" or _platform == "linux2":
print'[!] You are on Linux.'
print colors.cyan + '\n[1] Who is my master?'
print colors.magenta + '[2] Who do I listen to?'
print colors.reset
master_command = raw_input('\nGive me a command: ')
if master_command == '1':
os.system('say You are my master.')
os.system('whoami')
main_master()
elif master_command == '2':
os.system('say I listen to you my master.')
main_master()
else:
os.system('say Shutting down.')
time.sleep(3)
sys.exit()
elif _platform == "darwin":
print'[!] You are on OSX.'
print colors.cyan + '\n[1] Who is my master?'
print colors.magenta + '[2] Who do I listen to?'
print colors.reset
master_command = raw_input('\nGive me a command: ')
if master_command == '1':
os.system('say You are my master.')
os.system('whoami')
main_master()
elif master_command == '2':
os.system('say I listen to you my master.')
main_master()
else:
os.system('say Shutting down.')
time.sleep(3)
sys.exit()
main_master()
| {
"content_hash": "ce6c0a4581661791ec44baa7c3a047ec",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 57,
"avg_line_length": 28.867924528301888,
"alnum_prop": 0.5294117647058824,
"repo_name": "triggerNZ/illacceptanything",
"id": "8b6dbf17f581fe3415d8fa3f612c887399b1bebc",
"size": "1530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/OS_MASTER.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "AppleScript",
"bytes": "61"
},
{
"name": "Arduino",
"bytes": "709"
},
{
"name": "Assembly",
"bytes": "2005"
},
{
"name": "Brainfuck",
"bytes": "66542"
},
{
"name": "C",
"bytes": "38598"
},
{
"name": "C#",
"bytes": "55496"
},
{
"name": "C++",
"bytes": "16638"
},
{
"name": "CMake",
"bytes": "235"
},
{
"name": "CSS",
"bytes": "97227"
},
{
"name": "Clojure",
"bytes": "94838"
},
{
"name": "CoffeeScript",
"bytes": "153782"
},
{
"name": "Common Lisp",
"bytes": "1120"
},
{
"name": "Crystal",
"bytes": "7261"
},
{
"name": "Dart",
"bytes": "800"
},
{
"name": "Eagle",
"bytes": "1297646"
},
{
"name": "Emacs Lisp",
"bytes": "60"
},
{
"name": "Go",
"bytes": "19658"
},
{
"name": "HTML",
"bytes": "6432616"
},
{
"name": "Haskell",
"bytes": "100"
},
{
"name": "JSONiq",
"bytes": "536"
},
{
"name": "Java",
"bytes": "14922"
},
{
"name": "JavaScript",
"bytes": "5422014"
},
{
"name": "Julia",
"bytes": "25"
},
{
"name": "KiCad",
"bytes": "321244"
},
{
"name": "Lua",
"bytes": "336811"
},
{
"name": "Makefile",
"bytes": "1019"
},
{
"name": "OCaml",
"bytes": "78"
},
{
"name": "Objective-C",
"bytes": "3260"
},
{
"name": "PHP",
"bytes": "2035"
},
{
"name": "Python",
"bytes": "107865"
},
{
"name": "Racket",
"bytes": "4918"
},
{
"name": "Ruby",
"bytes": "18502"
},
{
"name": "Rust",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "42068"
},
{
"name": "Swift",
"bytes": "12055"
},
{
"name": "VimL",
"bytes": "60880"
},
{
"name": "Visual Basic",
"bytes": "1007"
}
],
"symlink_target": ""
} |
"""
PostgreSQLConf - file /var/lib/pgsql/data/postgresql.conf
=========================================================
The PostgreSQL configuration file is in a fairly standard 'key = value'
format, with the equals sign being optional. A hash mark (#) marks the
rest of the line as a comment.
The configuration then appears as a dictionary in the `data` property.
This parser does not attempt to know the default value of any property; it
only shows what's defined in the configuration file as given.
This parser also provides several utility functions to make sense of values
specific to PostgreSQL. These are:
* `as_duration(property)`
Convert the value (given in milliseconds, seconds, minutes, hours or
days) to seconds (as a floating point value).
* `as_boolean(property)`
If the value is 'on', 'true', 'yes', or '1', return True. If the value
is 'off', 'false', 'no' or '0', return False. Unique prefixes of these
are acceptable and case is ignored.
* `as_memory_bytes(property)`
Convert a number given in KB, MB or GB into bytes, where 1 kilobyte is
1024 bytes.
All three type conversion functions will raise a ValueError if the value
doesn't match the spec or cannot be converted to the correct type.
Example:
>>> pgsql = shared[PostgreSQLConf]
>>> 'port' in pgsql
True
>>> pgsql['port']
'5432'
>>>
"""
from .. import Parser, parser, get_active_lines, LegacyItemAccess
import re
@parser("postgresql.conf")
class PostgreSQLConf(LegacyItemAccess, Parser):
"""
Parses postgresql.conf and converts it into a dictionary of properties.
"""
_value_error_str = "Do not recognise '{val}' for setting '{item}' " +\
"as a {_type}"
def parse_content(self, content):
"""
Parsing rules from :
https://www.postgresql.org/docs/9.3/static/config-setting.html
One parameter is specified per line. The equal sign between name
and value is optional. Whitespace is insignificant and blank lines
are ignored. Hash marks (#) designate the remainder of the line as
a comment. Parameter values that are not simple identifiers or
numbers must be single-quoted. To embed a single quote in a
parameter value, write either two quotes (preferred) or
backslash-quote.
"""
pg_dict = {}
for line in get_active_lines(content):
# Comments and blank lines removed by get_active_lines
# Split on equals or on first word
if '=' in line:
key, value = [s.strip() for s in line.split("=", 1)]
else:
key, value = [s.strip() for s in line.split(' ', 1)]
# If value is quoted, quotes appear first and last - remove them.
if value[0] == "'" and value[-1] == "'":
value = value[1:-1]
# If value contains '' or \', change to single quote
if "''" in value:
value = value.replace("''", "'")
if "\\'" in value:
value = value.replace("\\'", "'")
# Now save value in key
pg_dict[key] = value
self.data = pg_dict
def as_duration(self, item, default=None):
"""
Postgres's time durations for checkpoint_timeout can have 'ms', 's',
'min', 'h', or 'd' suffixes. We convert all of them here to seconds.
See https://www.postgresql.org/docs/9.3/static/config-setting.html :-
"Valid time units are ms (milliseconds), s (seconds), min (minutes),
h (hours), and d (days)"
We return a floating point number because of the possibility of
convertion from milliseconds, and because maybe someone will say
8.4h.
"""
if not item:
return None
if item in self.data:
value = self.data[item]
else:
value = default
if isinstance(value, int) or isinstance(value, float):
return float(value)
dur_re = re.compile(r'^(?P<number>\d+)(?P<suffix>ms|s|min|h|d)?$')
length_of = {'ms': 0.001, 's': 1, 'min': 60, 'h': 3600, 'd': 86400}
match = dur_re.search(value)
if match:
# Do we have a suffix at all? If not, assume seconds, return float
number, suffix = match.group('number', 'suffix')
if suffix is None:
return float(number)
# Do we have a matching suffix?
# assert: suffix in length_of, due to regex
return float(number) * length_of[suffix]
else:
raise ValueError(self._value_error_str.format(
val=value, item=item, _type='duration'
))
def as_boolean(self, item, default=None):
"""
See https://www.postgresql.org/docs/9.3/static/config-setting.html :-
"Boolean values can be written as on, off, true, false, yes, no, 1,
0 (all case-insensitive) or any unambiguous prefix of these."
"""
if not item:
return None
if item in self.data:
value = self.data[item]
else:
value = default
if value is None or isinstance(value, bool):
return value
lval = value.lower()
if lval in ('on', 't', 'tr', 'tru', 'true', 'y', 'ye', 'yes', '1'):
return True
if lval in ('of', 'off', 'f', 'fa', 'fal', 'fals', 'false', 'n', 'no', '0'):
return False
raise ValueError(self._value_error_str.format(
val=value, item=item, _type='boolean'
))
def as_memory_bytes(self, item, default=None):
"""
See https://www.postgresql.org/docs/9.3/static/config-setting.html :-
"Valid memory units are kB (kilobytes), MB (megabytes), and GB
(gigabytes). Note that the multiplier for memory units is 1024, not
1000."
"""
if not item:
return None
size_of = {'kB': 1024, 'MB': 1048576, 'GB': 1048576 * 1024}
if item in self.data:
value = self.data[item]
else:
value = default
# Don't bother to do conversions if we're already integer-esque
if isinstance(value, int):
return value
elif value.isdigit():
return int(value)
suffix = value[-2:]
if suffix in size_of:
return int(value[:-2]) * size_of[suffix]
else:
raise ValueError(self._value_error_str.format(
val=value, item=item, _type='memory unit'
))
| {
"content_hash": "0373f97717e63f2f3006f2284219b85c",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 84,
"avg_line_length": 37.88202247191011,
"alnum_prop": 0.5642888921844876,
"repo_name": "PaulWay/insights-core",
"id": "629c153e1ddf22f97e2b83c398c28457ed887ed4",
"size": "6743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/postgresql_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "19339"
},
{
"name": "Python",
"bytes": "2479830"
},
{
"name": "Shell",
"bytes": "892"
}
],
"symlink_target": ""
} |
"""Testing for datatset loading."""
# Author: Johann Faouzi <johann.faouzi@gmail.com>
# License: BSD-3-Clause
import numpy as np
import pytest
from ..load import (load_basic_motions, load_coffee, load_gunpoint,
load_pig_central_venous_pressure)
@pytest.mark.parametrize(
'function, data_train_shape, data_test_shape, target_train_shape, '
'target_test_shape, n_classes',
[(load_basic_motions, (40, 6, 100), (40, 6, 100), (40,), (40,), 4),
(load_coffee, (28, 286), (28, 286), (28,), (28,), 2),
(load_gunpoint, (50, 150), (150, 150), (50,), (150,), 2),
(load_pig_central_venous_pressure, (104, 2000), (208, 2000), (104,),
(208,), 52)]
)
@pytest.mark.parametrize('return_X_y', [False, True])
def test_load_functions(function, data_train_shape, data_test_shape,
target_train_shape, target_test_shape, n_classes,
return_X_y):
"""Test the loading functions."""
res = function(return_X_y=return_X_y)
if return_X_y:
data_train, data_test, target_train, target_test = res
else:
data_train = res.data_train
data_test = res.data_test
target_train = res.target_train
target_test = res.target_test
assert isinstance(res.url, str)
assert isinstance(res.DESCR, str)
for data in (data_train, data_test, target_train, target_test):
assert isinstance(data, np.ndarray)
assert data_train.shape == data_train_shape
assert data_test.shape == data_test_shape
assert target_train.shape == target_train_shape
assert target_test.shape == target_test_shape
assert np.unique(target_train).size == n_classes
assert np.unique(target_test).size == n_classes
| {
"content_hash": "46a5390752785dd779abd54867d794dc",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 39.59090909090909,
"alnum_prop": 0.6274397244546498,
"repo_name": "johannfaouzi/pyts",
"id": "63f6d9e1c3dfb8b775ea055ce263faac504c5d8f",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyts/datasets/tests/test_load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "675880"
}
],
"symlink_target": ""
} |
default_app_config = 'queue.apps.QueueConfig'
| {
"content_hash": "fe24dfb650450aab029981dba6834c68",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 45,
"avg_line_length": 46,
"alnum_prop": 0.782608695652174,
"repo_name": "ngageoint/scale",
"id": "b87911a66d0c0137763c5c4d66525862e1df9c15",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scale/queue/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7219"
},
{
"name": "CSS",
"bytes": "12193"
},
{
"name": "Dockerfile",
"bytes": "14853"
},
{
"name": "HCL",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "48818"
},
{
"name": "JavaScript",
"bytes": "503"
},
{
"name": "Makefile",
"bytes": "5852"
},
{
"name": "Python",
"bytes": "5295677"
},
{
"name": "Shell",
"bytes": "26650"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from pandas.compat import pa_version_under2p0
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_datetime64tz_dtype
import pandas as pd
import pandas._testing as tm
from pandas.core.api import NumericIndex
from pandas.tests.base.common import allow_na_ops
def test_unique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under2p0 and str(index_or_series_obj.dtype) == "string[pyarrow]",
):
result = obj.unique()
# dict.fromkeys preserves the order
unique_values = list(dict.fromkeys(obj.values))
if isinstance(obj, pd.MultiIndex):
expected = pd.MultiIndex.from_tuples(unique_values)
expected.names = obj.names
tm.assert_index_equal(result, expected, exact=True)
elif isinstance(obj, pd.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(unique_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(unique_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_unique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj._values
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under2p0 and str(index_or_series_obj.dtype) == "string[pyarrow]",
):
result = obj.unique()
unique_values_raw = dict.fromkeys(obj.values)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)]
unique_values = [null_obj] + unique_values_not_null
if isinstance(obj, pd.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(unique_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(unique_values, dtype=obj.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_nunique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under2p0 and str(index_or_series_obj.dtype) == "string[pyarrow]",
):
expected = len(obj.unique())
assert obj.nunique(dropna=False) == expected
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_nunique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj._values
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
if isinstance(obj, pd.CategoricalIndex):
assert obj.nunique() == len(obj.categories)
assert obj.nunique(dropna=False) == len(obj.categories) + 1
else:
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under2p0 and str(index_or_series_obj.dtype) == "string[pyarrow]",
):
num_unique_values = len(obj.unique())
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under2p0 and str(index_or_series_obj.dtype) == "string[pyarrow]",
):
assert obj.nunique() == max(0, num_unique_values - 1)
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under2p0 and str(index_or_series_obj.dtype) == "string[pyarrow]",
):
assert obj.nunique(dropna=False) == max(0, num_unique_values)
@pytest.mark.single_cpu
@pytest.mark.xfail(
reason="Flaky in the CI. Remove once CI has a single build: GH 44584", strict=False
)
def test_unique_bad_unicode(index_or_series):
# regression test for #34550
uval = "\ud83d" # smiley emoji
obj = index_or_series([uval] * 2)
result = obj.unique()
if isinstance(obj, pd.Index):
expected = pd.Index(["\ud83d"], dtype=object)
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(["\ud83d"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_nunique_dropna(dropna):
# GH37566
ser = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT])
res = ser.nunique(dropna)
assert res == 1 if dropna else 5
| {
"content_hash": "786b91e1016b95a5287c4855c93ba93e",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 88,
"avg_line_length": 36.61392405063291,
"alnum_prop": 0.6551426101987899,
"repo_name": "datapythonista/pandas",
"id": "eac1e35699585a6a005b653f6668843080588b50",
"size": "5785",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/base/test_unique.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "355524"
},
{
"name": "CSS",
"bytes": "1662"
},
{
"name": "Cython",
"bytes": "1178139"
},
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "HTML",
"bytes": "456449"
},
{
"name": "Makefile",
"bytes": "505"
},
{
"name": "Python",
"bytes": "19048364"
},
{
"name": "Shell",
"bytes": "10511"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
"""
Customadmin is an application that allow admin interface's customization.
""" | {
"content_hash": "31da16960d90504e9aee38a3d782c5bd",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 74,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7682926829268293,
"repo_name": "20tab/upy",
"id": "6530665367dd252c629e3d9ab688d11c1870d54f",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upy/contrib/customadmin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "65284"
},
{
"name": "JavaScript",
"bytes": "614193"
},
{
"name": "PHP",
"bytes": "172931"
},
{
"name": "Python",
"bytes": "357681"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v12.errors",
marshal="google.ads.googleads.v12",
manifest={"KeywordPlanCampaignErrorEnum",},
)
class KeywordPlanCampaignErrorEnum(proto.Message):
r"""Container for enum describing possible errors from applying a
keyword plan campaign.
"""
class KeywordPlanCampaignError(proto.Enum):
r"""Enum describing possible errors from applying a keyword plan
campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_NAME = 2
INVALID_LANGUAGES = 3
INVALID_GEOS = 4
DUPLICATE_NAME = 5
MAX_GEOS_EXCEEDED = 6
MAX_LANGUAGES_EXCEEDED = 7
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "b31eddc143c11e71fbd6ea674145c6ea",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 24.93548387096774,
"alnum_prop": 0.6429495472186287,
"repo_name": "googleads/google-ads-python",
"id": "c343069718721376c4502ee0602c5faadf98fd50",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/errors/types/keyword_plan_campaign_error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_dns_record
short_description: Manages DNS records on Vultr.
description:
- Create, update and remove DNS records.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- The record name (subrecord).
default: ""
aliases: [ subrecord ]
domain:
description:
- The domain the record is related to.
required: true
record_type:
description:
- Type of the record.
default: A
choices:
- A
- AAAA
- CNAME
- MX
- SRV
- CAA
- TXT
- NS
- SSHFP
aliases: [ type ]
data:
description:
- Data of the record.
- Required if C(state=present) or C(multiple=yes).
ttl:
description:
- TTL of the record.
default: 300
multiple:
description:
- Whether to use more than one record with similar C(name) including no name and C(record_type).
- Only allowed for a few record types, e.g. C(record_type=A), C(record_type=NS) or C(record_type=MX).
- C(data) will not be updated, instead it is used as a key to find existing records.
default: no
type: bool
priority:
description:
- Priority of the record.
default: 0
state:
description:
- State of the DNS record.
default: present
choices: [ present, absent ]
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: Ensure an A record exists
vultr_dns_record:
name: www
domain: example.com
data: 10.10.10.10
ttl: 3600
- name: Ensure a second A record exists for round robin LB
vultr_dns_record:
name: www
domain: example.com
data: 10.10.10.11
ttl: 60
multiple: yes
- name: Ensure a CNAME record exists
vultr_dns_record:
name: web
record_type: CNAME
domain: example.com
data: www.example.com
- name: Ensure MX record exists
vultr_dns_record:
record_type: MX
domain: example.com
data: "{{ item.data }}"
priority: "{{ item.priority }}"
multiple: yes
with_items:
- { data: mx1.example.com, priority: 10 }
- { data: mx2.example.com, priority: 10 }
- { data: mx3.example.com, priority: 20 }
- name: Ensure a record is absent
local_action:
module: vultr_dns_record
name: www
domain: example.com
state: absent
- name: Ensure MX record is absent in case multiple exists
vultr_dns_record:
record_type: MX
domain: example.com
data: mx1.example.com
multiple: yes
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_dns_record:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: The ID of the DNS record.
returned: success
type: int
sample: 1265277
name:
description: The name of the DNS record.
returned: success
type: str
sample: web
record_type:
description: The name of the DNS record.
returned: success
type: str
sample: web
data:
description: Data of the DNS record.
returned: success
type: str
sample: 10.10.10.10
domain:
description: Domain the DNS record is related to.
returned: success
type: str
sample: example.com
priority:
description: Priority of the DNS record.
returned: success
type: int
sample: 10
ttl:
description: Time to live of the DNS record.
returned: success
type: int
sample: 300
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
RECORD_TYPES = [
'A',
'AAAA',
'CNAME',
'MX',
'TXT',
'NS',
'SRV',
'CAA',
'SSHFP'
]
class AnsibleVultrDnsRecord(Vultr):
def __init__(self, module):
super(AnsibleVultrDnsRecord, self).__init__(module, "vultr_dns_record")
self.returns = {
'RECORDID': dict(key='id'),
'name': dict(),
'record': dict(),
'priority': dict(),
'data': dict(),
'type': dict(key='record_type'),
'ttl': dict(),
}
def get_record(self):
records = self.api_query(path="/v1/dns/records?domain=%s" % self.module.params.get('domain'))
multiple = self.module.params.get('multiple')
data = self.module.params.get('data')
name = self.module.params.get('name')
record_type = self.module.params.get('record_type')
result = {}
for record in records or []:
if record.get('type') != record_type:
continue
if record.get('name') == name:
if not multiple:
if result:
self.module.fail_json(msg="More than one record with record_type=%s and name=%s params. "
"Use multiple=yes for more than one record." % (record_type, name))
else:
result = record
elif record.get('data') == data:
return record
return result
def present_record(self):
record = self.get_record()
if not record:
record = self._create_record(record)
else:
record = self._update_record(record)
return record
def _create_record(self, record):
self.result['changed'] = True
data = {
'name': self.module.params.get('name'),
'domain': self.module.params.get('domain'),
'data': self.module.params.get('data'),
'type': self.module.params.get('record_type'),
'priority': self.module.params.get('priority'),
'ttl': self.module.params.get('ttl'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/dns/create_record",
method="POST",
data=data
)
record = self.get_record()
return record
def _update_record(self, record):
data = {
'RECORDID': record['RECORDID'],
'name': self.module.params.get('name'),
'domain': self.module.params.get('domain'),
'data': self.module.params.get('data'),
'type': self.module.params.get('record_type'),
'priority': self.module.params.get('priority'),
'ttl': self.module.params.get('ttl'),
}
has_changed = [k for k in data if k in record and data[k] != record[k]]
if has_changed:
self.result['changed'] = True
self.result['diff']['before'] = record
self.result['diff']['after'] = record.copy()
self.result['diff']['after'].update(data)
if not self.module.check_mode:
self.api_query(
path="/v1/dns/update_record",
method="POST",
data=data
)
record = self.get_record()
return record
def absent_record(self):
record = self.get_record()
if record:
self.result['changed'] = True
data = {
'RECORDID': record['RECORDID'],
'domain': self.module.params.get('domain'),
}
self.result['diff']['before'] = record
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/dns/delete_record",
method="POST",
data=data
)
return record
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
domain=dict(required=True),
name=dict(default="", aliases=['subrecord']),
state=dict(choices=['present', 'absent'], default='present'),
ttl=dict(type='int', default=300),
record_type=dict(choices=RECORD_TYPES, default='A', aliases=['type']),
multiple=dict(type='bool', default=False),
priority=dict(type='int', default=0),
data=dict()
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['data']),
('multiple', True, ['data']),
],
supports_check_mode=True,
)
vultr_record = AnsibleVultrDnsRecord(module)
if module.params.get('state') == "absent":
record = vultr_record.absent_record()
else:
record = vultr_record.present_record()
result = vultr_record.get_result(record)
module.exit_json(**result)
if __name__ == '__main__':
main()
| {
"content_hash": "c99adeefd18b17a9d6486425e08a90ba",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 117,
"avg_line_length": 27.002732240437158,
"alnum_prop": 0.5617727410705251,
"repo_name": "thaim/ansible",
"id": "1aec81f60b359a8554dbd61925de6445b8ce10da",
"size": "10067",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vultr/vultr_dns_record.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import re
class Preprocess:
def __init__(self):
self.html_regex = re.compile(
r'(http|https)://[a-zA-Z0-9-./"#$%&\':?=_]+')
self.newline_regex = re.compile(r'\n')
self.cont_spaces_regex = re.compile(r'\s+')
def _subs(self, regex: "re obj", repl: str, text: str):
return regex.sub(repl, text)
def remove_link(self, text: str) -> str:
return self._subs(self.html_regex, "", text)
def remove_newline(self, text: str) -> str:
return self._subs(self.newline_regex, "", text)
def remove_spaces(self, text: str) -> str:
return self._subs(self.cont_spaces_regex, "", text)
def convert_cont_spaces(self, text: str) -> str:
return self._subs(self.cont_spaces_regex, " ", text)
def strip(self, text: str) -> str:
return text.strip()
def execute(self, text: str) -> str:
funcs = [
self.remove_newline,
self.remove_link,
self.convert_cont_spaces,
self.strip]
_text = text
for func in funcs:
_text = func(_text)
return _text
class Twitter(Preprocess):
def __init__(self):
Preprocess.__init__(self)
username = r'@[a-zA-Z0-9_]+'
tag = r'#[a-zA-Z0-9_]+'
self.mention_regex = re.compile(r'{}'.format(username))
self.retweet_regex = re.compile(r'RT {}:'.format(username))
self.tag_regex = re.compile(r'{}'.format(tag))
def remove_mention(self, text: str) -> str:
return self._subs(self.mention_regex, "", text)
def remove_retweet(self, text: str) -> str:
return self._subs(self.retweet_regex, "", text)
def remove_tag(self, text: str) -> str:
return self._subs(self.tag_regex, "", text)
def execute(self, text: str) -> str:
funcs = [
self.remove_newline,
self.remove_link,
self.remove_retweet,
self.remove_mention,
self.remove_tag,
self.convert_cont_spaces,
self.strip]
_text = text
for func in funcs:
_text = func(_text)
return _text
if __name__ == '__main__':
import sys
pre = Preprocess()
for filename in sys.argv[1:]:
print(filename)
with open(filename, "r") as f:
for line in f:
_line = line.strip()
print(pre.execute(_line))
| {
"content_hash": "03d1b53fdc0847b2311874d4c5e75894",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 67,
"avg_line_length": 27.670454545454547,
"alnum_prop": 0.5330595482546201,
"repo_name": "kenkov/nlp",
"id": "8960747f3cf742354351d6fdb273f69031d89329",
"size": "2474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocessing/preprocessing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1911"
},
{
"name": "Haskell",
"bytes": "5191"
},
{
"name": "JavaScript",
"bytes": "96"
},
{
"name": "Makefile",
"bytes": "566"
},
{
"name": "Python",
"bytes": "17089"
}
],
"symlink_target": ""
} |
import datreant.core as dtr
import datreant.data.attach
import pandas as pd
import numpy as np
import pytest
import os
import py
from datreant.data.tests import test_data
class TestTreant:
treantname = 'testtreant'
treanttype = 'Treant'
treantclass = dtr.treants.Treant
@pytest.fixture
def treant(self, tmpdir):
with tmpdir.as_cwd():
c = dtr.treants.Treant(TestTreant.treantname)
return c
class TestData:
"""Test data storage and retrieval"""
class DataMixin:
"""Mixin class for data storage tests.
Contains general tests to be used for all storable data formats.
"""
handle = 'testdata'
def test_add_data(self, treant, datastruct):
treant.data.add(self.handle, datastruct)
assert os.path.exists(os.path.join(treant.abspath,
self.handle,
self.datafile))
def test_remove_data(self, treant, datastruct):
treant.data.add(self.handle, datastruct)
assert os.path.exists(os.path.join(treant.abspath,
self.handle,
self.datafile))
treant.data.remove('testdata')
assert not os.path.exists(os.path.join(treant.abspath,
self.handle,
self.datafile))
# check that directory got deleted, too
assert not os.path.exists(os.path.join(treant.abspath,
self.handle))
def test_retrieve_data(self, treant, datastruct):
treant.data.add(self.handle, datastruct)
np.testing.assert_equal(treant.data.retrieve(self.handle),
datastruct)
np.testing.assert_equal(treant.data[self.handle],
datastruct)
class PandasMixin(DataMixin):
"""Mixin class for pandas tests"""
datafile = datreant.data.pddata.pddatafile
def test_retrieve_data(self, treant, datastruct):
treant.data.add(self.handle, datastruct)
np.testing.assert_equal(
treant.data.retrieve(self.handle).values,
datastruct.values)
np.testing.assert_equal(
treant.data[self.handle].values,
datastruct.values)
# TODO
class AppendablesMixin:
"""Mixin class for pandas objects that we expect should append"""
def test_append_data(self, treant, datastruct):
index = datastruct.index
for i in range(5):
treant.data.append(self.handle, datastruct)
stored = treant.data.retrieve(self.handle)
equiv = pd.concat([datastruct]*5)
np.testing.assert_equal(stored, equiv)
class Test_Series(test_data.Series, PandasMixin):
pass
class Test_DataFrame(test_data.DataFrame, PandasMixin):
pass
class Test_Blank_DataFrame(test_data.Blank_DataFrame, PandasMixin):
pass
class Test_Wide_Blank_DataFrame(test_data.Wide_Blank_DataFrame,
PandasMixin):
pass
class Test_Thin_Blank_DataFrame(test_data.Thin_Blank_DataFrame,
PandasMixin):
pass
class Test_Panel(test_data.Panel, PandasMixin):
pass
class Test_Panel4D(test_data.Panel4D, PandasMixin):
pass
class NumpyMixin(DataMixin):
"""Test numpy datastructure storage and retrieval"""
datafile = datreant.data.npdata.npdatafile
class Test_NumpyScalar(test_data.NumpyScalar, NumpyMixin):
pass
class Test_Numpy1D(test_data.Numpy1D, NumpyMixin):
pass
class Test_Numpy2D(test_data.Numpy2D, NumpyMixin):
pass
class Test_Wide_Numpy2D(test_data.Wide_Numpy2D, NumpyMixin):
pass
class Test_Thin_Numpy2D(test_data.Thin_Numpy2D, NumpyMixin):
pass
class Test_Numpy3D(test_data.Numpy3D, NumpyMixin):
pass
class Test_Numpy4D(test_data.Numpy4D, NumpyMixin):
pass
class PythonMixin(DataMixin):
"""Test pandas datastructure storage and retrieval"""
datafile = datreant.data.pydata.pydatafile
def test_overwrite_data(self, treant, datastruct):
treant.data[self.handle] = datastruct
# overwrite the data with a scalar
treant.data[self.handle] = 23
assert treant.data[self.handle] == 23
class Test_List(test_data.List, PythonMixin):
pass
class Test_Dict(test_data.Dict, PythonMixin):
pass
class Test_Tuple(test_data.Tuple, PythonMixin):
pass
class Test_Set(test_data.Set, PythonMixin):
pass
class Test_Dict_Mix(test_data.Dict_Mix, PythonMixin):
pass
| {
"content_hash": "67376ca66137f9272649a2d7f062d70d",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 77,
"avg_line_length": 33.67080745341615,
"alnum_prop": 0.5368013281682347,
"repo_name": "datreant/datreant.data",
"id": "52a8e30e6f9b6d7e527592cfbf6ed365002d6823",
"size": "5422",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/datreant/data/tests/test_treants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "50877"
}
],
"symlink_target": ""
} |
"""
Cmd2 unit/functional testing
"""
import sys
from contextlib import (
redirect_stderr,
redirect_stdout,
)
from typing import (
List,
Optional,
Union,
)
from unittest import (
mock,
)
from pytest import (
fixture,
)
import cmd2
from cmd2.rl_utils import (
readline,
)
from cmd2.utils import (
StdSim,
)
def verify_help_text(
cmd2_app: cmd2.Cmd, help_output: Union[str, List[str]], verbose_strings: Optional[List[str]] = None
) -> None:
"""This function verifies that all expected commands are present in the help text.
:param cmd2_app: instance of cmd2.Cmd
:param help_output: output of help, either as a string or list of strings
:param verbose_strings: optional list of verbose strings to search for
"""
if isinstance(help_output, str):
help_text = help_output
else:
help_text = ''.join(help_output)
commands = cmd2_app.get_visible_commands()
for command in commands:
assert command in help_text
if verbose_strings:
for verbose_string in verbose_strings:
assert verbose_string in help_text
# Help text for the history command
HELP_HISTORY = """Usage: history [-h] [-r | -e | -o FILE | -t TRANSCRIPT_FILE | -c] [-s] [-x]
[-v] [-a]
[arg]
View, run, edit, save, or clear previously entered commands
positional arguments:
arg empty all history items
a one history item by number
a..b, a:b, a:, ..b items by indices (inclusive)
string items containing string
/regex/ items matching regular expression
optional arguments:
-h, --help show this help message and exit
-r, --run run selected history items
-e, --edit edit and then run selected history items
-o, --output_file FILE
output commands to a script file, implies -s
-t, --transcript TRANSCRIPT_FILE
output commands and results to a transcript file,
implies -s
-c, --clear clear all history
formatting:
-s, --script output commands in script format, i.e. without command
numbers
-x, --expanded output fully parsed commands with any aliases and
macros expanded, instead of typed commands
-v, --verbose display history and include expanded commands if they
differ from the typed command
-a, --all display all commands, including ones persisted from
previous sessions
"""
# Output from the shortcuts command with default built-in shortcuts
SHORTCUTS_TXT = """Shortcuts for other commands:
!: shell
?: help
@: run_script
@@: _relative_run_script
"""
# Output from the set command
SET_TXT = (
"Name Value Description \n"
"==================================================================================================================\n"
"allow_style Terminal Allow ANSI text style sequences in output (valid values: \n"
" Always, Never, Terminal) \n"
"always_show_hint False Display tab completion hint even when completion suggestions\n"
" print \n"
"debug False Show full traceback on exception \n"
"echo False Echo command issued into output \n"
"editor vim Program used by 'edit' \n"
"feedback_to_output False Include nonessentials in '|', '>' results \n"
"max_completion_items 50 Maximum number of CompletionItems to display during tab \n"
" completion \n"
"quiet False Don't print nonessential feedback \n"
"timing False Report execution times \n"
)
def normalize(block):
"""Normalize a block of text to perform comparison.
Strip newlines from the very beginning and very end Then split into separate lines and strip trailing whitespace
from each line.
"""
assert isinstance(block, str)
block = block.strip('\n')
return [line.rstrip() for line in block.splitlines()]
def run_cmd(app, cmd):
"""Clear out and err StdSim buffers, run the command, and return out and err"""
saved_sysout = sys.stdout
sys.stdout = app.stdout
# This will be used to capture app.stdout and sys.stdout
copy_cmd_stdout = StdSim(app.stdout)
# This will be used to capture sys.stderr
copy_stderr = StdSim(sys.stderr)
try:
app.stdout = copy_cmd_stdout
with redirect_stdout(copy_cmd_stdout):
with redirect_stderr(copy_stderr):
app.onecmd_plus_hooks(cmd)
finally:
app.stdout = copy_cmd_stdout.inner_stream
sys.stdout = saved_sysout
out = copy_cmd_stdout.getvalue()
err = copy_stderr.getvalue()
return normalize(out), normalize(err)
@fixture
def base_app():
return cmd2.Cmd(include_py=True, include_ipy=True)
# These are odd file names for testing quoting of them
odd_file_names = ['nothingweird', 'has spaces', '"is_double_quoted"', "'is_single_quoted'"]
def complete_tester(text: str, line: str, begidx: int, endidx: int, app) -> Optional[str]:
"""
This is a convenience function to test cmd2.complete() since
in a unit test environment there is no actual console readline
is monitoring. Therefore we use mock to provide readline data
to complete().
:param text: the string prefix we are attempting to match
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param app: the cmd2 app that will run completions
:return: The first matched string or None if there are no matches
Matches are stored in app.completion_matches
These matches also have been sorted by complete()
"""
def get_line():
return line
def get_begidx():
return begidx
def get_endidx():
return endidx
# Run the readline tab completion function with readline mocks in place
with mock.patch.object(readline, 'get_line_buffer', get_line):
with mock.patch.object(readline, 'get_begidx', get_begidx):
with mock.patch.object(readline, 'get_endidx', get_endidx):
return app.complete(text, 0)
| {
"content_hash": "4e613dd9b80566223dc54a1a6b09f8e6",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 122,
"avg_line_length": 38.1413612565445,
"alnum_prop": 0.5478380233356211,
"repo_name": "python-cmd2/cmd2",
"id": "07039504d58036ecf557a811c6d33c2c9584d1f9",
"size": "7300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1090127"
},
{
"name": "Shell",
"bytes": "3446"
}
],
"symlink_target": ""
} |
import ast
from pyxc.importing import SourcePath, orderedModules
from pyxc.transforming import Transformer, SourceMap, exportSourceMap
from pyxc.util import topLevelNamesInBody
import pj.js_ast
import pj.transformations
#### Code to Code
def codeToCode(py):
t = Transformer(pj.transformations, pj.js_ast.JSStatements)
jsAst = t.transformCode(py)
js = '%s\n%s' % ('\n'.join(t.snippets), str(jsAst))
names = set(topLevelNamesInBody(ast.parse(py).body))
if len(names) > 0:
js = 'var %s;\n\n%s' % (
', '.join(names),
js)
return js
#### Build Bundle
def buildBundle(mainModule, path=None, createSourceMap=False, includeSource=False, prependJs=None):
assert path
t = Transformer(pj.transformations, pj.js_ast.JSStatements)
sourcePath = SourcePath(path)
modules = orderedModules(sourcePath, mainModule)
jsArr = []
topLevelNames = set()
linemaps = []
mappings = []
sourceDict = {}
i = 0
for module in modules:
fileKey = str(i)
i += 1
codePath = sourcePath.pathForModule(module)
with open(codePath, 'rb') as f:
py = str(f.read(), 'utf-8')
sourceDict[fileKey] = {
'path': codePath,
'code': py,
'module': module,
}
if codePath.endswith('.js'):
js = py
else:
# Load the top-level names and confirm they're distinct
for name in topLevelNamesInBody(ast.parse(py).body):
assert name not in topLevelNames
topLevelNames.add(name)
# py → js
jsAst = t.transformCode(py)
if createSourceMap:
sm = SourceMap(fileKey, nextMappingId=len(mappings))
sm.handleNode(jsAst)
js = sm.getCode() + '\n'
assert len(sm.linemaps) == len(js.split('\n')) - 1
linemaps += sm.linemaps
mappings += sm.mappings
else:
js = str(jsAst)
jsArr.append(js)
if len(topLevelNames) > 0:
varJs = 'var %s;' % ', '.join(list(topLevelNames))
else:
varJs = ''
jsPrefix = ''.join([
(prependJs + '\n\n') if prependJs is not None else '',
'(function(){\n\n',
'\n'.join(t.snippets), '\n\n',
varJs, '\n\n'])
jsSuffix = '\n\n})();'
linemaps = (
[([-1] * (len(s) + 2)) for s in jsPrefix.split('\n')[:-1]] +
linemaps[:-1] +
[([-1] * (len(s) + 2)) for s in jsPrefix.split('\n')])
js = ''.join([
jsPrefix,
''.join(jsArr),
jsSuffix])
info = {
'js': js,
}
if createSourceMap:
info['sourceMap'] = exportSourceMap(linemaps, mappings, sourceDict)
if includeSource:
info['sourceDict'] = dict(
(
sourceDict[k]['module'],
sourceDict[k])
for k in sourceDict)
return info
| {
"content_hash": "f06129165afbf4e2914259d72886c190",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 99,
"avg_line_length": 28.22047244094488,
"alnum_prop": 0.44503348214285715,
"repo_name": "andrewschaaf/pyxc-pj",
"id": "5fd6ed85f5895d1917e88341470b3a440a037916",
"size": "3585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pj/api_internal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "8100"
},
{
"name": "Python",
"bytes": "74901"
}
],
"symlink_target": ""
} |
"""
WSGI config for servers_openomf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "servers_openomf.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "939307f825d98adbf192c96e9e45b1f3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.928571428571427,
"alnum_prop": 0.7777777777777778,
"repo_name": "omf2097/servers.openomf.org",
"id": "808dbb363ed684d0d98bf4f24bbd87dd67945374",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servers_openomf/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5316"
},
{
"name": "Python",
"bytes": "3104"
}
],
"symlink_target": ""
} |
""" OpenAI GPT configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class OpenAIGPTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a :class:`~transformers.OpenAIGPTModel` or a
:class:`~transformers.TFOpenAIGPTModel`. It is used to instantiate a GPT model according to the specified
arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar
configuration to that of the `GPT <https://huggingface.co/openai-gpt>`__ architecture from OpenAI.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 40478):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.OpenAIGPTModel` or
:class:`~transformers.TFOpenAIGPTModel`.
n_positions (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_ctx (:obj:`int`, `optional`, defaults to 512):
Dimensionality of the causal mask (usually same as n_positions).
n_embd (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
afn (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
resid_pdrop (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (:obj:`int`, `optional`, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5):
The epsilon to use in the layer normalization layers
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
predict_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not special tokens should be predicted when the model has a language modeling head.
summary_type (:obj:`str`, `optional`, defaults to :obj:`"cls_index"`):
Argument used when doing sequence summary, used in the models
:class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Has to be one of the following options:
- :obj:`"last"`: Take the last token hidden state (like XLNet).
- :obj:`"first"`: Take the first token hidden state (like BERT).
- :obj:`"mean"`: Take the mean of all tokens hidden states.
- :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- :obj:`"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`):
Argument used when doing sequence summary, used in the models
:class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Whether or not to add a projection after the vector extraction.
summary_activation (:obj:`str`, `optional`):
Argument used when doing sequence summary, used in the models
:class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`):
Argument used when doing sequence summary, used in the models
:class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes.
summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1):
Argument used when doing sequence summary, used in the models
:class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`.
The dropout ratio to be used after the projection and activation.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Examples::
>>> from transformers import OpenAIGPTConfig, OpenAIGPTModel
>>> # Initializing a GPT configuration
>>> configuration = OpenAIGPTConfig()
>>> # Initializing a model from the configuration
>>> model = OpenAIGPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "openai-gpt"
def __init__(
self,
vocab_size=40478,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| {
"content_hash": "d198a118fb42085347aabf301cea05b8",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 119,
"avg_line_length": 47.838509316770185,
"alnum_prop": 0.6504803947026746,
"repo_name": "huggingface/pytorch-transformers",
"id": "1e7bf8ec8caeaa688dd44fe9961717e9305b9e4b",
"size": "8395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/transformers/models/openai/configuration_openai.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
from .dataset import Dataset
class AzureSearchIndexDataset(Dataset):
"""The Azure Search Index.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param linked_service_name: Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param type: Constant filled by server.
:type type: str
:param index_name: The name of the Azure Search Index. Type: string (or
Expression with resultType string).
:type index_name: object
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
'index_name': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'type': {'key': 'type', 'type': 'str'},
'index_name': {'key': 'typeProperties.indexName', 'type': 'object'},
}
def __init__(self, linked_service_name, index_name, additional_properties=None, description=None, structure=None, parameters=None):
super(AzureSearchIndexDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, linked_service_name=linked_service_name, parameters=parameters)
self.index_name = index_name
self.type = 'AzureSearchIndex'
| {
"content_hash": "b5366aa96d2772b084b4d746c69d65db",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 208,
"avg_line_length": 45.212765957446805,
"alnum_prop": 0.6748235294117647,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "2058678b113acba852c15e81204afaf10bbba7b9",
"size": "2599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_search_index_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from django.apps import apps
from django.conf import settings
from taiga.base.utils.db import to_tsquery
from taiga.projects.userstories.utils import attach_total_points
MAX_RESULTS = getattr(settings, "SEARCHES_MAX_RESULTS", 150)
def search_epics(project, text):
model = apps.get_model("epics", "Epic")
queryset = model.objects.filter(project_id=project.pk)
table = "epics_epic"
return _search_items(queryset, table, text)
def search_user_stories(project, text):
model = apps.get_model("userstories", "UserStory")
queryset = model.objects.filter(project_id=project.pk)
table = "userstories_userstory"
return _search_items(queryset, table, text)
def search_tasks(project, text):
model = apps.get_model("tasks", "Task")
queryset = model.objects.filter(project_id=project.pk)
table = "tasks_task"
return _search_items(queryset, table, text)
def search_issues(project, text):
model = apps.get_model("issues", "Issue")
queryset = model.objects.filter(project_id=project.pk)
table = "issues_issue"
return _search_items(queryset, table, text)
def search_wiki_pages(project, text):
model = apps.get_model("wiki", "WikiPage")
queryset = model.objects.filter(project_id=project.pk)
tsquery = "to_tsquery('english_nostop', %s)"
tsvector = """
setweight(to_tsvector('english_nostop', coalesce(wiki_wikipage.slug)), 'A') ||
setweight(to_tsvector('english_nostop', coalesce(wiki_wikipage.content)), 'B')
"""
return _search_by_query(queryset, tsquery, tsvector, text)
def _search_items(queryset, table, text):
tsquery = "to_tsquery('english_nostop', %s)"
tsvector = """
setweight(to_tsvector('english_nostop',
coalesce({table}.subject) || ' ' ||
coalesce({table}.ref)), 'A') ||
setweight(to_tsvector('english_nostop', coalesce(inmutable_array_to_string({table}.tags))), 'B') ||
setweight(to_tsvector('english_nostop', coalesce({table}.description)), 'C')
""".format(table=table)
return _search_by_query(queryset, tsquery, tsvector, text)
def _search_by_query(queryset, tsquery, tsvector, text):
select = {
"rank": "ts_rank({tsvector},{tsquery})".format(tsquery=tsquery,
tsvector=tsvector),
}
order_by = ["-rank", ]
where = ["{tsvector} @@ {tsquery}".format(tsquery=tsquery,
tsvector=tsvector), ]
if text:
queryset = queryset.extra(select=select,
select_params=[to_tsquery(text)],
where=where,
params=[to_tsquery(text)],
order_by=order_by)
queryset = attach_total_points(queryset)
return queryset[:MAX_RESULTS]
| {
"content_hash": "b608cfddcb3e4050ababf9e82fb862dd",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 107,
"avg_line_length": 37.166666666666664,
"alnum_prop": 0.6095205243187306,
"repo_name": "mattcongy/itshop",
"id": "adda60bbdeb8d3cca6e11a7a714dc0c05837ebfe",
"size": "3835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker-images/taigav2/taiga-back/taiga/searches/services.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103474"
},
{
"name": "CoffeeScript",
"bytes": "3380"
},
{
"name": "HTML",
"bytes": "274547"
},
{
"name": "JavaScript",
"bytes": "203660"
},
{
"name": "Nginx",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "3591150"
},
{
"name": "Ruby",
"bytes": "164978"
},
{
"name": "Shell",
"bytes": "5238"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import;
import re;
import os;
import sys;
from pymfony.component.system import Object;
from pymfony.component.system import Tool;
from pymfony.component.system.oop import interface;
from pymfony.component.system.oop import abstract;
from pymfony.component.system.exception import RuntimeException;
from pymfony.component.system.exception import InvalidArgumentException;
from pymfony.component.console.formatter import OutputFormatterInterface;
from pymfony.component.console.formatter import OutputFormatter;
"""
"""
@interface
class OutputInterface(Object):
"""OutputInterface is the interface implemented by all Output classes.
@author Fabien Potencier <fabien@symfony.com>
@api
"""
VERBOSITY_QUIET = 0;
VERBOSITY_NORMAL = 1;
VERBOSITY_VERBOSE = 2;
OUTPUT_NORMAL = 0;
OUTPUT_RAW = 1;
OUTPUT_PLAIN = 2;
def write(self, messages, newline = False, outputType = 0):
"""Writes a message to the output.
@param string|array messages The message as an array of lines or a single string
@param Boolean newline Whether to add a newline or not
@param integer type The type of output (0: normal, 1: raw, 2: plain)
@raise InvalidArgumentException When unknown output type is given
@api
"""
def writeln(self, messages, outputType = 0):
"""Writes a message to the output and adds a newline at the end.
@param string|array messages The message as an array of lines or a single string
@param integer type The type of output (0: normal, 1: raw, 2: plain)
@api
"""
def setVerbosity(self, level):
"""Sets the verbosity of the output.
@param integer level The level of verbosity
@api
"""
def getVerbosity(self):
"""Gets the current verbosity of the output.
@return integer The current level of verbosity
@api
"""
def setDecorated(self, decorated):
"""Sets the decorated flag.
@param Boolean decorated Whether to decorate the messages or not
@api
"""
def isDecorated(self):
"""Gets the decorated flag.
@return Boolean True if the output will decorate messages, False otherwise:
@api
"""
def setFormatter(self, formatter):
"""Sets output formatter.
@param OutputFormatterInterface formatter
@api
"""
assert isinstance(formatter, OutputFormatterInterface);
def getFormatter(self):
"""Returns current output formatter instance.
@return OutputFormatterInterface
@api
"""
@interface
class ConsoleOutputInterface(OutputInterface):
"""ConsoleOutputInterface is the interface implemented by ConsoleOutput class.
This adds information about stderr output stream.
@author Dariusz Górecki <darek.krk@gmail.com>
"""
def getErrorOutput(self):
"""
@return OutputInterface
"""
def setErrorOutput(self, error):
assert isinstance(error, OutputInterface);
@abstract
class Output(OutputInterface):
"""Base class for output classes.
There are three levels of verbosity:
normal: no option passed (normal output - information)
verbose: -v (more output - debug)
quiet: -q (no output)
@author Fabien Potencier <fabien@symfony.com>
@api
"""
def __init__(self, verbosity = OutputInterface.VERBOSITY_NORMAL, decorated = None, formatter = None):
"""Constructor.
@param integer verbosity The verbosity level (self.VERBOSITY_QUIET, self.VERBOSITY_NORMAL, self.VERBOSITY_VERBOSE)
@param Boolean decorated Whether to decorate messages or not (None for auto-guessing)
@param OutputFormatterInterface formatter Output formatter instance
@api
"""
if formatter:
assert isinstance(formatter, OutputFormatterInterface);
self.__verbosity = None;
self.__formatter = None;
if None is verbosity:
self.__verbosity = self.VERBOSITY_NORMAL;
else:
self.__verbosity = verbosity;
if None is formatter:
self.__formatter = OutputFormatter();
else:
self.__formatter = formatter;
self.__formatter.setDecorated(bool(decorated));
def setFormatter(self, formatter):
"""Sets output formatter.
@param OutputFormatterInterface formatter
@api
"""
assert isinstance(formatter, OutputFormatterInterface);
self.__formatter = formatter;
def getFormatter(self):
"""Returns current output formatter instance.
@return OutputFormatterInterface
@api
"""
return self.__formatter;
def setDecorated(self, decorated):
"""Sets the decorated flag.
@param Boolean decorated Whether to decorate the messages or not
@api
"""
self.__formatter.setDecorated(bool(decorated));
def isDecorated(self):
"""Gets the decorated flag.
@return Boolean True if the output will decorate messages, False otherwise:
@api
"""
return self.__formatter.isDecorated();
def setVerbosity(self, level):
"""Sets the verbosity of the output.
@param integer level The level of verbosity
@api
"""
self.__verbosity = int(level);
def getVerbosity(self):
"""Gets the current verbosity of the output.
@return integer The current level of verbosity
@api
"""
return self.__verbosity;
def writeln(self, messages, outputType = OutputInterface.OUTPUT_NORMAL):
"""Writes a message to the output and adds a newline at the end.
@param string|list messages The message as an array of lines or a
single string
@param integer outputType The type of output
@api
"""
self.write(messages, True, outputType);
def write(self, messages, newline = False, outputType = OutputInterface.OUTPUT_NORMAL):
"""Writes a message to the output.
@param string|list messages The message as a list of lines or a single string
@param Boolean newline Whether to add a newline or not
@param integer type The type of output
@raise InvalidArgumentException When unknown output type is given
@api
"""
if (self.VERBOSITY_QUIET == self.__verbosity) :
return;
if not isinstance(messages, list):
messages = [str(messages)];
for message in messages:
if OutputInterface.OUTPUT_NORMAL == outputType:
message = self.__formatter.format(message);
elif OutputInterface.OUTPUT_RAW == outputType:
pass;
elif OutputInterface.OUTPUT_PLAIN == outputType:
message = self.__formatter.format(message);
message = re.sub(r'<[^>]*?>', '', message);
else:
raise InvalidArgumentException(
'Unknown output type given ({0})'.format(outputType)
);
self._doWrite(message, newline);
@abstract
def _doWrite(self, message, newline):
"""Writes a message to the output.
@param string message A message to write to the output
@param Boolean newline Whether to add a newline or not
"""
class NullOutput(Output):
"""NullOutput suppresses all output.
output = NullOutput();
@author Fabien Potencier <fabien@symfony.com>
@api
"""
def _doWrite(self, message, newline):
"""Writes a message to the output.
@param string message A message to write to the output
@param Boolean newline Whether to add a newline or not
"""
class StreamOutput(Output):
"""StreamOutput writes the output to a given stream.
Usage:
output = StreamOutput(fopen('php://stdout', 'w'));
As `StreamOutput` can use any stream, you can also use a file:
output = StreamOutput(fopen('/path/to/output.log', 'a', False));
@author Fabien Potencier <fabien@symfony.com>
@api
"""
def __init__(self, stream, verbosity = Output.VERBOSITY_NORMAL, decorated = None, formatter = None):
"""Constructor.
@param mixed stream A stream resource
@param integer verbosity The verbosity level (self.VERBOSITY_QUIET, self.VERBOSITY_NORMAL,
self.VERBOSITY_VERBOSE)
@param Boolean decorated Whether to decorate messages or not (None for auto-guessing)
@param OutputFormatterInterface formatter Output formatter instance
@raise InvalidArgumentException When first argument is not a real stream
@api
"""
if formatter:
assert isinstance(formatter, OutputFormatterInterface);
self.__stream = None;
for method in ['flush', 'write', 'isatty']:
if not Tool.isCallable(getattr(stream, method, None)):
raise InvalidArgumentException(
'The StreamOutput class needs a stream as its first '
'argument.'
);
self.__stream = stream;
if (None is decorated) :
decorated = self._hasColorSupport();
Output.__init__(self, verbosity, decorated, formatter);
def getStream(self):
"""Gets the stream attached to this StreamOutput instance.
@return resource A stream resource
"""
return self.__stream;
def _doWrite(self, message, newline):
"""Writes a message to the output.
@param string message A message to write to the output
@param Boolean newline Whether to add a newline or not
@raise RuntimeException When unable to write output (should never happen)
"""
if newline:
text = message + os.linesep;
else:
text = message;
try:
self.__stream.write(text);
except TypeError:
self.__stream.write(text.encode());
except IOError:
# @codeCoverageIgnoreStart
# should never happen
raise RuntimeException('Unable to write output.');
# @codeCoverageIgnoreEnd
self.__stream.flush();
def _hasColorSupport(self):
"""Returns True if the stream supports colorization.:
Colorization is disabled if not supported by the stream::
- windows without ansicon and ConEmu
- non tty consoles
@return Boolean True if the stream supports colorization, False otherwise:
"""
# @codeCoverageIgnoreStart
if (os.path.sep == '\\') :
return 'ANSICON' in os.environ or (
'ConEmuANSI' in os.environ and os.environ['ConEmuANSI'] == 'ON'
);
return self.__stream.isatty();
# @codeCoverageIgnoreEnd
class ConsoleOutput(StreamOutput, ConsoleOutputInterface):
"""ConsoleOutput is the default class for(, all CLI output. It uses STDOUT.):
This class is(, a convenient wrapper around `StreamOutput`.):
output = ConsoleOutput();
This is equivalent to:
output = StreamOutput(fopen('php://stdout', 'w'));
@author Fabien Potencier <fabien@symfony.com>
@api
"""
def __init__(self, verbosity = StreamOutput.VERBOSITY_NORMAL, decorated = None, formatter = None):
"""Constructor.
@param integer verbosity The verbosity level (self.VERBOSITY_QUIET, self.VERBOSITY_NORMAL,
self.VERBOSITY_VERBOSE)
@param Boolean decorated Whether to decorate messages or not (None for auto-guessing)
@param OutputFormatterInterface formatter Output formatter instance
@api
"""
if formatter:
assert isinstance(formatter, OutputFormatterInterface);
self.__stderr = None;
StreamOutput.__init__(self, sys.stdout, verbosity, decorated, formatter);
self.__stderr = StreamOutput(sys.stderr, verbosity, decorated, formatter);
def setDecorated(self, decorated):
StreamOutput.setDecorated(self, decorated);
self.__stderr.setDecorated(decorated);
def setFormatter(self, formatter):
assert isinstance(formatter, OutputFormatterInterface);
StreamOutput.setFormatter(self, formatter);
self.__stderr.setFormatter(formatter);
def setVerbosity(self, level):
StreamOutput.setVerbosity(self, level);
self.__stderr.setVerbosity(level);
def getErrorOutput(self):
"""@return OutputInterface
"""
return self.__stderr;
def setErrorOutput(self, error):
assert isinstance(error, OutputInterface);
self.__stderr = error;
def _hasStdoutSupport(self):
"""Returns True if current environment supports writing console output to:
STDOUT.
IBM iSeries (OS400) exhibits character-encoding issues when writing to
STDOUT and doesn't properly convert ASCII to EBCDIC, resulting in garbage
output.
@return boolean
"""
return hasattr(sys, 'stdout');
| {
"content_hash": "067733662c9cb8b89a6b77f578ab0f6c",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 139,
"avg_line_length": 25.609756097560975,
"alnum_prop": 0.6108424908424909,
"repo_name": "pymfony/pymfony",
"id": "972b596037f799c0ae0a096a15ca79ddc0a77e0d",
"size": "13893",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.2",
"path": "src/pymfony/component/console/output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "855624"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.functional import lazy
@login_required
def logout_user(request):
logout(request)
messages.success(request, 'You have successfully logged out.')
return HttpResponseRedirect(reverse('core:home'))
def private(request):
return TemplateResponse(request, 'users/private.html', None) | {
"content_hash": "35bf5dcccd8e6662a7486a55877db67a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 33.142857142857146,
"alnum_prop": 0.8146551724137931,
"repo_name": "plankter/augeo-cloud",
"id": "54b3289c7a859349eefcdbdfb41fe2f12427a987",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20320"
},
{
"name": "JavaScript",
"bytes": "17544"
},
{
"name": "Python",
"bytes": "46624"
}
],
"symlink_target": ""
} |
"""todolist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "970b01f151f573375c43246a7ea8db91",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 36.095238095238095,
"alnum_prop": 0.6992084432717678,
"repo_name": "udumge/todolist",
"id": "e416e80f0b525981223196a302b29fbb45292f27",
"size": "758",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "todolist/todolist/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "4665"
},
{
"name": "Python",
"bytes": "4295"
}
],
"symlink_target": ""
} |
import os
import re
import shutil
import olympia.core.logger
from olympia import amo
from olympia.addons.models import AddonUser
from olympia.amo.celery import task
from olympia.files.utils import update_version_number
from olympia.lib.crypto.signing import sign_file
from olympia.addons.models import Addon
from olympia.versions.compare import version_int
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.task')
MAIL_COSE_SUBJECT = (
u'Your Firefox extension has been re-signed with a stronger signature')
MAIL_COSE_MESSAGE = u'''
Hello,
Mozilla has recently upgraded the signing [1] for Firefox extensions, themes,
dictionaries, and langpacks to provide a stronger signature. All add-on
versions uploaded to addons.mozilla.org after April 5, 2019 have this
signature. We plan to stop accepting the old signature with Firefox 70 [2].
The current version of your add-on, {addon}, listed on addons.mozilla.org has
been automatically re-signed with the stronger signature. Your add-on will
remain backwards compatible with previous versions of Firefox, including ESR 68
[3], and will continue working when your users upgrade to Firefox 70.
You do not need to take any action at this time.
Regards,
The Add-ons Team
---
[1] https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/Distribution
[2] https://wiki.mozilla.org/Release_Management/Calendar
[3] https://www.mozilla.org/firefox/enterprise/
--
You have received this email because you are a registered developer of a
Firefox add-on. If you do not want to receive these updates regarding your
add-on, please sign in to addons.mozilla.org and delete your add-on(s).
'''
version_regex = re.compile(
r'^(?P<prefix>.*)(?P<version>\.1\-signed)(|\-(?P<number>\d+))$')
def get_new_version_number(version):
match = version_regex.search(version)
if not match:
return u'{}.1-signed'.format(version)
else:
num = int(match.groupdict()['number'] or 1)
return u'{}{}-{}'.format(
match.groupdict()['prefix'],
match.groupdict()['version'],
num + 1)
@task
def sign_addons(addon_ids, force=False, **kw):
"""Used to sign all the versions of an addon.
This is used in the 'process_addons --task resign_addons_for_cose'
management command.
It also bumps the version number of the file and the Version, so the
Firefox extension update mechanism picks this new signed version and
installs it.
"""
log.info(u'[{0}] Signing addons.'.format(len(addon_ids)))
mail_subject, mail_message = MAIL_COSE_SUBJECT, MAIL_COSE_MESSAGE
# query everything except for search-plugins as they're generally
# not signed
current_versions = (
Addon.objects
.filter(id__in=addon_ids)
.values_list('_current_version', flat=True))
qset = Version.objects.filter(id__in=current_versions)
addons_emailed = set()
for version in qset:
# We only sign files that have been reviewed
to_sign = version.files.filter(status__in=amo.REVIEWED_STATUSES)
to_sign = to_sign.all()
if not to_sign:
log.info(
u'Not signing addon {0}, version {1} (no files)'
.format(version.addon, version))
log.info(
u'Signing addon {0}, version {1}'
.format(version.addon, version))
bumped_version_number = get_new_version_number(version.version)
signed_at_least_a_file = False # Did we sign at least one file?
# We haven't cleared the database yet to ensure that there's only
# one file per WebExtension, so we're going through all files just
# to be sure.
for file_obj in to_sign:
if not os.path.isfile(file_obj.file_path):
log.info(u'File {0} does not exist, skip'.format(file_obj.pk))
continue
# Save the original file, before bumping the version.
backup_path = u'{0}.backup_signature'.format(file_obj.file_path)
shutil.copy(file_obj.file_path, backup_path)
try:
# Need to bump the version (modify manifest file)
# before the file is signed.
update_version_number(file_obj, bumped_version_number)
signed = bool(sign_file(file_obj))
if signed: # Bump the version number if at least one signed.
signed_at_least_a_file = True
else: # We didn't sign, so revert the version bump.
shutil.move(backup_path, file_obj.file_path)
except Exception:
log.error(u'Failed signing file {0}'.format(file_obj.pk),
exc_info=True)
# Revert the version bump, restore the backup.
shutil.move(backup_path, file_obj.file_path)
# Now update the Version model, if we signed at least one file.
if signed_at_least_a_file:
version.update(version=bumped_version_number,
version_int=version_int(bumped_version_number))
addon = version.addon
if addon.pk not in addons_emailed:
# Send a mail to the owners/devs warning them we've
# automatically signed their addon.
qs = (AddonUser.objects
.filter(role=amo.AUTHOR_ROLE_OWNER, addon=addon)
.exclude(user__email__isnull=True))
emails = qs.values_list('user__email', flat=True)
subject = mail_subject
message = mail_message.format(addon=addon.name)
amo.utils.send_mail(
subject, message, recipient_list=emails,
headers={'Reply-To': 'amo-admins@mozilla.com'})
addons_emailed.add(addon.pk)
| {
"content_hash": "8f025082a2022ad2fcfd3453c40cf7a0",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 38.12337662337662,
"alnum_prop": 0.6368591381366037,
"repo_name": "psiinon/addons-server",
"id": "513477cb138449914ac2e6956c3dba0543304675",
"size": "5871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/lib/crypto/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "752741"
},
{
"name": "Dockerfile",
"bytes": "4089"
},
{
"name": "HTML",
"bytes": "314894"
},
{
"name": "JavaScript",
"bytes": "947557"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "5192809"
},
{
"name": "Shell",
"bytes": "6712"
},
{
"name": "Smarty",
"bytes": "1418"
},
{
"name": "TSQL",
"bytes": "6926"
}
],
"symlink_target": ""
} |
'''
Usage::
./decoder.py [module] [data_file]
Sample::
./decoder.py pyroute2.netlink.rtnl.tcmsg.tcmsg ./sample_packet_01.data
./decoder.py pyroute2.netlink.nl80211.nl80211cmd ./nl80211.data
Module is a name within rtnl hierarchy. File should be a
binary data in the escaped string format (see samples).
'''
import sys
from pprint import pprint
from importlib import import_module
from pyroute2.common import load_dump
mod = sys.argv[1]
f = open(sys.argv[2], 'r')
s = mod.split('.')
package = '.'.join(s[:-1])
module = s[-1]
m = import_module(package)
met = getattr(m, module)
data = load_dump(f)
offset = 0
inbox = []
while offset < len(data):
msg = met(data[offset:])
msg.decode()
pprint(msg)
offset += msg['header']['length']
| {
"content_hash": "0197847a433ec790c8c749bc9942ed26",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.6740837696335078,
"repo_name": "mtiny/pyroute2",
"id": "c39c7527f7e4f98a343e51e4d491b92b1a75680e",
"size": "782",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/data/decoder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4203"
},
{
"name": "Python",
"bytes": "647380"
},
{
"name": "Shell",
"bytes": "2490"
}
],
"symlink_target": ""
} |
__all__ = ['FileConfDriver']
"""This module specifies file drivers.
They work relatively to system/user conf directories and an optional conf
directory given by the environment variable ``B3J0F_CONF_DIR``.
"""
from os import environ, getenv
from os.path import exists, join, expanduser, sep, abspath
from ..base import ConfDriver
from sys import prefix
CONF_DIRS = [] #: all config directories
def _addconfig(config, *paths):
"""Add path to CONF_DIRS if exists."""
for path in paths:
if path is not None and exists(path):
config.append(path)
# add installation directory
_addconfig(CONF_DIRS, join(sep, *(__file__.split(sep)[:-3] + ['data'])))
# add unix system conf
_addconfig(CONF_DIRS, join(sep, 'etc'), join(sep, 'usr', 'local', 'etc'))
_addconfig(
CONF_DIRS,
join(prefix, '.config'), join(prefix, 'config'), join(prefix, 'etc')
)
# add XDG conf dirs
_addconfig(CONF_DIRS, getenv('XDG_CONFIG_HOME'), getenv('XDG_CONFIG_DIRS'))
# add windows conf dirs
_addconfig(
CONF_DIRS,
getenv('APPDATA'), getenv('PROGRAMDATA'), getenv('LOCALAPPDATA')
)
HOME = environ.get(
'HOME', # unix-like
environ.get(
'USERPROFILE', # windows-like
expanduser('~')
) # default
) #: set user home directory
# add usr config
_addconfig(
CONF_DIRS, join(HOME, '.config'), join(HOME, 'config'), join(HOME, 'etc')
)
_addconfig(CONF_DIRS, '.') # add current directory
B3J0F_CONF_DIR = 'B3J0F_CONF_DIR' #: conf dir environment variable name.
if B3J0F_CONF_DIR in environ: # add b3j0F_CONF_DIR if necessary
CONF_DIRS.append(environ[B3J0F_CONF_DIR]) #: conf dir environment variable
class FileConfDriver(ConfDriver):
"""Conf Manager dedicated to files."""
def rscpaths(self, path):
result = list(
join(conf_dir, path) for conf_dir in CONF_DIRS
if exists(join(conf_dir, path))
)
rel_path = expanduser(path) # add relative path
if exists(rel_path):
result.append(rel_path)
else:
abs_path = abspath(path) # add absolute path
if exists(abs_path):
result.append(abs_path)
return result
| {
"content_hash": "f1dc346f79c31ad0abde9cba9cff9378",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 25.298850574712645,
"alnum_prop": 0.6383462062698774,
"repo_name": "b3j0f/conf",
"id": "2ac49ad6e678b66751c0dd4c9772b5558c2bd4fc",
"size": "3521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "b3j0f/conf/driver/file/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "224833"
}
],
"symlink_target": ""
} |
import collections
def dicts_are_equal(dict1, dict2):
if len(set(dict1.keys()).symmetric_difference(set(dict2.keys()))) > 0:
return False
result = True
for key in dict1.keys():
if isinstance(dict1[key], dict) and isinstance(dict2[key], dict):
result &= dicts_are_equal(dict1[key], dict2[key])
else:
result &= dict1[key] == dict2[key]
return result
def recursive_dict_update(dict1, dict2):
for key, value in dict2.items():
dict1_entry = dict1.get(key, {})
if isinstance(value, collections.Mapping) and isinstance(dict1_entry, collections.Mapping):
r = recursive_dict_update(dict1_entry, value)
dict1[key] = r
else:
dict1[key] = dict2[key]
return dict1
def add_to_dict_with_path(destination_dict, key, value, path=None):
current_level = destination_dict
if path is not None and len(path) > 0:
for entry in path:
if entry not in current_level or not isinstance(current_level[entry], dict):
current_level[entry] = {}
current_level = current_level[entry]
if key != "" and value != "":
current_level[key] = value
def get_from_dict_with_path(source_dict, key, path=None, default=None):
current_level = source_dict
if path is not None and len(path) > 0:
for entry in path:
if not isinstance(current_level, collections.Mapping) or entry not in current_level:
return default
current_level = current_level[entry]
if not isinstance(current_level, collections.Mapping):
return default
return current_level.get(key, default)
def merge_fragment_edge_data(fragment_data_1, fragment_data_2):
result = {"name": [fragment_data_1["name"] if fragment_data_1 is not None else None,
fragment_data_2["name"] if fragment_data_2 is not None else None],
"forward_orientation": [fragment_data_1["forward_orientation"] if fragment_data_1 is not None else None,
fragment_data_2["forward_orientation"] if fragment_data_2 is not None else None]}
return result
| {
"content_hash": "fd271ff75fe7728d06e1e1cc559cd1a2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 119,
"avg_line_length": 39.872727272727275,
"alnum_prop": 0.622891016871865,
"repo_name": "aganezov/bg",
"id": "8894eba3c7c94fa1762d6d9ddf245731ea6860a4",
"size": "2217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bg/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629658"
},
{
"name": "Shell",
"bytes": "1355"
}
],
"symlink_target": ""
} |
""" This module returns stats about the DynamoDB table """
import math
from datetime import datetime, timedelta
from boto.exception import DynamoDBResponseError
import dynamodb
from dynamic_dynamodb.log_handler import LOGGER as logger
from cloudwatch import CLOUDWATCH_CONNECTION as cloudwatch_connection
def get_consumed_read_units(table_name, time_frame=300):
""" Returns the number of consumed read units
:type table_name: str
:param table_name: Name of the DynamoDB table
:type time_frame: int
:param time_frame: How many seconds to look at
:returns: int -- Number of consumed reads
"""
metrics = cloudwatch_connection.get_metric_statistics(
time_frame,
datetime.utcnow()-timedelta(minutes=10, seconds=time_frame),
datetime.utcnow()-timedelta(minutes=10),
'ConsumedReadCapacityUnits',
'AWS/DynamoDB',
['Sum'],
dimensions={'TableName': table_name},
unit='Count')
if metrics:
consumed_read_units = int(
math.ceil(float(metrics[0]['Sum'])/float(time_frame)))
else:
consumed_read_units = 0
logger.info('{0} - Consumed read units: {1:d}'.format(
table_name, consumed_read_units))
return consumed_read_units
def get_consumed_write_units(table_name, time_frame=300):
""" Returns the number of consumed write units
:type table_name: str
:param table_name: Name of the DynamoDB table
:type time_frame: int
:param time_frame: How many seconds to look at
:returns: int -- Number of consumed writes
"""
metrics = cloudwatch_connection.get_metric_statistics(
time_frame,
datetime.utcnow()-timedelta(minutes=10, seconds=time_frame),
datetime.utcnow()-timedelta(minutes=10),
'ConsumedWriteCapacityUnits',
'AWS/DynamoDB',
['Sum'],
dimensions={'TableName': table_name},
unit='Count')
if metrics:
consumed_write_units = int(
math.ceil(float(metrics[0]['Sum'])/float(time_frame)))
else:
consumed_write_units = 0
logger.info('{0} - Consumed write units: {1:d}'.format(
table_name, consumed_write_units))
return consumed_write_units
def get_consumed_read_units_percent(table_name, time_frame=300):
""" Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type time_frame: int
:param time_frame: How many seconds to look at
:returns: int -- Number of consumed reads
"""
metrics = cloudwatch_connection.get_metric_statistics(
period=time_frame,
start_time=datetime.utcnow()-timedelta(minutes=10, seconds=time_frame),
end_time=datetime.utcnow()-timedelta(minutes=10),
metric_name='ConsumedReadCapacityUnits',
namespace='AWS/DynamoDB',
statistics=['Sum'],
dimensions={'TableName': table_name},
unit='Count')
if metrics:
consumed_read_units = int(
math.ceil(float(metrics[0]['Sum'])/float(time_frame)))
else:
consumed_read_units = 0
consumed_read_units_percent = int(
math.ceil(
float(consumed_read_units) /
float(get_provisioned_read_units(table_name)) * 100))
logger.info('{0} - Consumed read units: {1:d}%'.format(
table_name, consumed_read_units_percent))
return consumed_read_units_percent
def get_consumed_write_units_percent(table_name, time_frame=300):
""" Returns the number of consumed write units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type time_frame: int
:param time_frame: How many seconds to look at
:returns: int -- Number of consumed writes
"""
metrics = cloudwatch_connection.get_metric_statistics(
period=time_frame,
start_time=datetime.utcnow()-timedelta(minutes=10, seconds=time_frame),
end_time=datetime.utcnow()-timedelta(minutes=10),
metric_name='ConsumedWriteCapacityUnits',
namespace='AWS/DynamoDB',
statistics=['Sum'],
dimensions={'TableName': table_name},
unit='Count')
if metrics:
consumed_write_units = int(
math.ceil(float(metrics[0]['Sum'])/float(time_frame)))
else:
consumed_write_units = 0
consumed_write_units_percent = int(
math.ceil(
float(consumed_write_units) /
float(get_provisioned_write_units(table_name)) * 100))
logger.info('{0} - Consumed write units: {1:d}%'.format(
table_name, consumed_write_units_percent))
return consumed_write_units_percent
def get_provisioned_read_units(table_name):
""" Returns the number of provisioned read units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of read units
"""
try:
table = dynamodb.get_table(table_name)
except DynamoDBResponseError:
# Return if the table does not exist
return None
logger.debug('{0} - Provisioned read units: {1:d}'.format(
table_name, table.read_units))
return int(table.read_units)
def get_provisioned_write_units(table_name):
""" Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of write units
"""
try:
table = dynamodb.get_table(table_name)
except DynamoDBResponseError:
# Return if the table does not exist
return None
logger.debug('{0} - Provisioned write units: {1:d}'.format(
table_name, table.write_units))
return int(table.write_units)
| {
"content_hash": "7a07b5d76aee55da445db09bee698352",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 79,
"avg_line_length": 32.777142857142856,
"alnum_prop": 0.650278940027894,
"repo_name": "jamiepg1/dynamic-dynamodb",
"id": "b31a72f3dac6dd71861c7f9f202fb0e29793c3bb",
"size": "5736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic_dynamodb/core/statistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from nose.tools import *
import datetime
import myanimelist.session
import myanimelist.manga
class testMangaClass(object):
@classmethod
def setUpClass(self):
self.session = myanimelist.session.Session()
self.monster = self.session.manga(1)
self.mystery = self.session.genre(7)
self.mystery_tag = self.session.tag(u'mystery')
self.urasawa = self.session.person(1867)
self.original = self.session.publication(1)
self.heinemann = self.session.character(6123)
self.monster_side_story = self.session.manga(10968)
self.holic = self.session.manga(10)
self.supernatural = self.session.genre(37)
self.supernatural_tag = self.session.tag(u'supernatural')
self.clamp = self.session.person(1877)
self.bessatsu = self.session.publication(450)
self.doumeki = self.session.character(567)
self.holic_sequel = self.session.manga(46010)
self.naruto = self.session.manga(11)
self.shounen = self.session.genre(27)
self.action_tag = self.session.tag(u'action')
self.kishimoto = self.session.person(1879)
self.shonen_jump_weekly = self.session.publication(83)
self.ebizou = self.session.character(31825)
self.tomoyo_after = self.session.manga(3941)
self.drama = self.session.genre(8)
self.romance_tag = self.session.tag(u'romance')
self.sumiyoshi = self.session.person(3830)
self.dragon_age = self.session.publication(98)
self.kanako = self.session.character(21227)
self.judos = self.session.manga(79819)
self.action = self.session.genre(1)
self.kondou = self.session.person(18765)
self.invalid_anime = self.session.manga(457384754)
self.latest_manga = myanimelist.manga.Manga.newest(self.session)
@raises(TypeError)
def testNoIDInvalidManga(self):
self.session.manga()
@raises(TypeError)
def testNoSessionInvalidLatestManga(self):
myanimelist.manga.Manga.newest()
@raises(myanimelist.manga.InvalidMangaError)
def testNegativeInvalidManga(self):
self.session.manga(-1)
@raises(myanimelist.manga.InvalidMangaError)
def testFloatInvalidManga(self):
self.session.manga(1.5)
@raises(myanimelist.manga.InvalidMangaError)
def testNonExistentManga(self):
self.invalid_anime.load()
def testLatestManga(self):
assert isinstance(self.latest_manga, myanimelist.manga.Manga)
assert self.latest_manga.id > 79818
def testMangaValid(self):
assert isinstance(self.monster, myanimelist.manga.Manga)
def testTitle(self):
assert self.monster.title == u'Monster'
assert self.holic.title == u'xxxHOLiC'
assert self.naruto.title == u'Naruto'
assert self.tomoyo_after.title == u'Clannad: Tomoyo After'
assert self.judos.title == u'Judos'
def testPicture(self):
assert isinstance(self.holic.picture, unicode)
assert isinstance(self.naruto.picture, unicode)
assert isinstance(self.monster.picture, unicode)
assert isinstance(self.tomoyo_after.picture, unicode)
assert isinstance(self.judos.picture, unicode)
def testAlternativeTitles(self):
assert u'Japanese' in self.monster.alternative_titles and isinstance(self.monster.alternative_titles[u'Japanese'], list) and u'MONSTER モンスター' in self.monster.alternative_titles[u'Japanese']
assert u'Synonyms' in self.holic.alternative_titles and isinstance(self.holic.alternative_titles[u'Synonyms'], list) and u'xxxHolic Cage' in self.holic.alternative_titles[u'Synonyms']
assert u'Japanese' in self.naruto.alternative_titles and isinstance(self.naruto.alternative_titles[u'Japanese'], list) and u'NARUTO -ナルト-' in self.naruto.alternative_titles[u'Japanese']
assert u'English' in self.tomoyo_after.alternative_titles and isinstance(self.tomoyo_after.alternative_titles[u'English'], list) and u'Tomoyo After ~Dear Shining Memories~' in self.tomoyo_after.alternative_titles[u'English']
assert u'Synonyms' in self.judos.alternative_titles and isinstance(self.judos.alternative_titles[u'Synonyms'], list) and u'Juudouzu' in self.judos.alternative_titles[u'Synonyms']
def testTypes(self):
assert self.monster.type == u'Manga'
assert self.tomoyo_after.type == u'Manga'
assert self.judos.type == u'Manga'
def testVolumes(self):
assert self.holic.volumes == 19
assert self.monster.volumes == 18
assert self.tomoyo_after.volumes == 1
assert self.naruto.volumes == 72
assert self.judos.volumes == 3
def testChapters(self):
assert self.holic.chapters == 213
assert self.monster.chapters == 162
assert self.tomoyo_after.chapters == 4
assert self.naruto.chapters == 700
assert self.judos.chapters == None
def testStatus(self):
assert self.holic.status == u'Finished'
assert self.tomoyo_after.status == u'Finished'
assert self.monster.status == u'Finished'
assert self.naruto.status == u'Finished'
def testPublished(self):
assert self.holic.published == (datetime.date(month=2, day=24, year=2003), datetime.date(month=2, day=9, year=2011))
assert self.monster.published == (datetime.date(month=12, day=5, year=1994), datetime.date(month=12, day=20, year=2001))
assert self.naruto.published == (datetime.date(month=9, day=21, year=1999),datetime.date(month=11, day=10, year=2014))
assert self.tomoyo_after.published == (datetime.date(month=4, day=20, year=2007), datetime.date(month=10, day=20, year=2007))
def testGenres(self):
assert isinstance(self.holic.genres, list) and len(self.holic.genres) > 0 and self.mystery in self.holic.genres and self.supernatural in self.holic.genres
assert isinstance(self.tomoyo_after.genres, list) and len(self.tomoyo_after.genres) > 0 and self.drama in self.tomoyo_after.genres
assert isinstance(self.naruto.genres, list) and len(self.naruto.genres) > 0 and self.shounen in self.naruto.genres
assert isinstance(self.monster.genres, list) and len(self.monster.genres) > 0 and self.mystery in self.monster.genres
assert isinstance(self.judos.genres, list) and len(self.judos.genres) > 0 and self.shounen in self.judos.genres and self.action in self.judos.genres
def testAuthors(self):
assert isinstance(self.holic.authors, dict) and len(self.holic.authors) > 0 and self.clamp in self.holic.authors and self.holic.authors[self.clamp] == u'Story & Art'
assert isinstance(self.tomoyo_after.authors, dict) and len(self.tomoyo_after.authors) > 0 and self.sumiyoshi in self.tomoyo_after.authors and self.tomoyo_after.authors[self.sumiyoshi] == u'Art'
assert isinstance(self.naruto.authors, dict) and len(self.naruto.authors) > 0 and self.kishimoto in self.naruto.authors and self.naruto.authors[self.kishimoto] == u'Story & Art'
assert isinstance(self.monster.authors, dict) and len(self.monster.authors) > 0 and self.urasawa in self.monster.authors and self.monster.authors[self.urasawa] == u'Story & Art'
assert isinstance(self.judos.authors, dict) and len(self.judos.authors) > 0 and self.kondou in self.judos.authors and self.judos.authors[self.kondou] == u'Story & Art'
def testSerialization(self):
assert isinstance(self.holic.serialization, myanimelist.publication.Publication) and self.bessatsu == self.holic.serialization
assert isinstance(self.tomoyo_after.serialization, myanimelist.publication.Publication) and self.dragon_age == self.tomoyo_after.serialization
assert isinstance(self.naruto.serialization, myanimelist.publication.Publication) and self.shonen_jump_weekly == self.naruto.serialization
assert isinstance(self.monster.serialization, myanimelist.publication.Publication) and self.original == self.monster.serialization
assert isinstance(self.judos.serialization, myanimelist.publication.Publication) and self.shonen_jump_weekly == self.judos.serialization
def testScore(self):
assert isinstance(self.holic.score, tuple)
assert self.holic.score[0] > 0 and self.holic.score[0] < 10
assert isinstance(self.holic.score[1], int) and self.holic.score[1] >= 0
assert isinstance(self.monster.score, tuple)
assert self.monster.score[0] > 0 and self.monster.score[0] < 10
assert isinstance(self.monster.score[1], int) and self.monster.score[1] >= 0
assert isinstance(self.naruto.score, tuple)
assert self.naruto.score[0] > 0 and self.naruto.score[0] < 10
assert isinstance(self.naruto.score[1], int) and self.naruto.score[1] >= 0
assert isinstance(self.tomoyo_after.score, tuple)
assert self.tomoyo_after.score[0] > 0 and self.tomoyo_after.score[0] < 10
assert isinstance(self.tomoyo_after.score[1], int) and self.tomoyo_after.score[1] >= 0
assert self.judos.score[0] >= 0 and self.judos.score[0] <= 10
assert isinstance(self.judos.score[1], int) and self.judos.score[1] >= 0
def testRank(self):
assert isinstance(self.holic.rank, int) and self.holic.rank > 0
assert isinstance(self.monster.rank, int) and self.monster.rank > 0
assert isinstance(self.naruto.rank, int) and self.naruto.rank > 0
assert isinstance(self.tomoyo_after.rank, int) and self.tomoyo_after.rank > 0
assert isinstance(self.judos.rank, int) and self.judos.rank > 0
def testPopularity(self):
assert isinstance(self.holic.popularity, int) and self.holic.popularity > 0
assert isinstance(self.monster.popularity, int) and self.monster.popularity > 0
assert isinstance(self.naruto.popularity, int) and self.naruto.popularity > 0
assert isinstance(self.tomoyo_after.popularity, int) and self.tomoyo_after.popularity > 0
assert isinstance(self.judos.popularity, int) and self.judos.popularity > 0
def testMembers(self):
assert isinstance(self.holic.members, int) and self.holic.members > 0
assert isinstance(self.monster.members, int) and self.monster.members > 0
assert isinstance(self.naruto.members, int) and self.naruto.members > 0
assert isinstance(self.tomoyo_after.members, int) and self.tomoyo_after.members > 0
assert isinstance(self.judos.members, int) and self.judos.members > 0
def testFavorites(self):
assert isinstance(self.holic.favorites, int) and self.holic.favorites > 0
assert isinstance(self.monster.favorites, int) and self.monster.favorites > 0
assert isinstance(self.naruto.favorites, int) and self.naruto.favorites > 0
assert isinstance(self.tomoyo_after.favorites, int) and self.tomoyo_after.favorites > 0
assert isinstance(self.judos.favorites, int) and self.judos.favorites >= 0
def testPopularTags(self):
assert isinstance(self.holic.popular_tags, dict) and len(self.holic.popular_tags) > 0 and self.supernatural_tag in self.holic.popular_tags and self.holic.popular_tags[self.supernatural_tag] >= 269
assert isinstance(self.tomoyo_after.popular_tags, dict) and len(self.tomoyo_after.popular_tags) > 0 and self.romance_tag in self.tomoyo_after.popular_tags and self.tomoyo_after.popular_tags[self.romance_tag] >= 57
assert isinstance(self.naruto.popular_tags, dict) and len(self.naruto.popular_tags) > 0 and self.action_tag in self.naruto.popular_tags and self.naruto.popular_tags[self.action_tag] >= 561
assert isinstance(self.monster.popular_tags, dict) and len(self.monster.popular_tags) > 0 and self.mystery_tag in self.monster.popular_tags and self.monster.popular_tags[self.mystery_tag] >= 105
assert isinstance(self.judos.popular_tags, dict) and len(self.judos.popular_tags) == 0
def testSynopsis(self):
assert isinstance(self.holic.synopsis, unicode) and len(self.holic.synopsis) > 0 and u'Watanuki' in self.holic.synopsis
assert isinstance(self.monster.synopsis, unicode) and len(self.monster.synopsis) > 0 and u'Tenma' in self.monster.synopsis
assert isinstance(self.naruto.synopsis, unicode) and len(self.naruto.synopsis) > 0 and u'Hokage' in self.naruto.synopsis
assert isinstance(self.tomoyo_after.synopsis, unicode) and len(self.tomoyo_after.synopsis) > 0 and u'Clannad' in self.tomoyo_after.synopsis
assert isinstance(self.judos.synopsis, unicode) and len(self.judos.synopsis) > 0 and u'hardcore' in self.judos.synopsis
def testRelated(self):
assert isinstance(self.holic.related, dict) and 'Sequel' in self.holic.related and self.holic_sequel in self.holic.related[u'Sequel']
assert isinstance(self.monster.related, dict) and 'Side story' in self.monster.related and self.monster_side_story in self.monster.related[u'Side story']
def testCharacters(self):
assert isinstance(self.holic.characters, dict) and len(self.holic.characters) > 0
assert self.doumeki in self.holic.characters and self.holic.characters[self.doumeki]['role'] == 'Main'
assert isinstance(self.monster.characters, dict) and len(self.monster.characters) > 0
assert self.heinemann in self.monster.characters and self.monster.characters[self.heinemann]['role'] == 'Main'
assert isinstance(self.naruto.characters, dict) and len(self.naruto.characters) > 0
assert self.ebizou in self.naruto.characters and self.naruto.characters[self.ebizou]['role'] == 'Supporting'
assert isinstance(self.tomoyo_after.characters, dict) and len(self.tomoyo_after.characters) > 0
assert self.kanako in self.tomoyo_after.characters and self.tomoyo_after.characters[self.kanako]['role'] == 'Supporting' | {
"content_hash": "69020f92d1b20fe3aa8b4b9b96282583",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 228,
"avg_line_length": 59.62443438914027,
"alnum_prop": 0.7467557107080519,
"repo_name": "XueAlfred/MALAnalysis",
"id": "7f179eeba0388472036a15f9520bc759ee6be479",
"size": "13236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper-code/tests/manga_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6467"
},
{
"name": "Makefile",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "192473"
},
{
"name": "TeX",
"bytes": "650025"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
from datetime import datetime, timedelta, date
from kivy.storage.jsonstore import JsonStore
from kivy.clock import Clock, mainthread
import os
class DBInterface(object):
def __init__(self, data_dir, name, do_date=False, do_hour=False, **kwargs):
super(DBInterface, self).__init__(**kwargs)
self.ensure_dir(data_dir)
if do_date:
if do_hour:
date = self.convert_time_to_json_ymdh(self.get_time())
else:
date = self.convert_time_to_json_ymd(self.get_time())
json_name = data_dir + name + '-' + date + '.json'
reset_json_name = (
data_dir + name + '-' + date + '-reset_timers.json')
else:
json_name = data_dir + name + '.json'
reset_json_name = data_dir + name + '-reset_timers.json'
self.data = data = JsonStore(json_name)
self.reset_timers = reset_timers = JsonStore(reset_json_name)
self.sync = Clock.create_trigger(self.trigger_sync)
self.check_reset(0.)
Clock.schedule_interval(self.check_reset, 60.)
def ensure_dir(self, f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def check_reset(self, dt):
reset_timers = self.reset_timers
current_time = self.get_time()
keys_to_del = []
for each in reset_timers:
expire_time = self.convert_time_from_json(each)
if expire_time < current_time:
data = reset_timers[each]
self.set_entry(data['table'], data['row'],
data['name'], None)
keys_to_del.append(each)
for key in keys_to_del:
reset_timers.delete(key)
def trigger_sync(self, dt):
data = self.data
data._is_changed = True
data.store_sync()
def get_entry(self, table, row, name):
data = self.data
try:
return data[table][row][name]['value']
except:
return None
def get_row(self, table, row):
data = self.data
try:
return data[table][row]
except:
return None
def get_table(self, table):
data = self.data
try:
return data[table]
except:
return None
@mainthread
def remove_entry(self, table, row, name, value):
data = self.data
try:
name_data = data[table][row][name]
except:
print('no entry: ', table, row, name)
try:
name_data['value'].remove(value)
except:
print(value, 'not found in: ', table, row, name)
self.sync()
@mainthread
def append_entry(self, table, row, name, value, do_timestamp=False):
data = self.data
try:
table_data = data[table]
except:
data[table] = table_data = {}
try:
row_data = table_data[row]
except:
table_data[row] = row_data = {}
try:
name_data = row_data[name]
except:
name_data = {'value': []}
row_data[name] = name_data
if do_timestamp:
time = self.get_time()
time_stamp = self.convert_time_to_json(time)
value = (value, time_stamp)
name_data['value'].append(value)
self.sync()
@mainthread
def set_entry(self, table, row, name, value, do_history=False,
reset_in_hours=None, do_timestamp=False):
data = self.data
print('set_entry', table, row, name, value)
try:
table_data = data[table]
except:
data[table] = table_data = {}
try:
row_data = table_data[row]
except:
table_data[row] = row_data = {}
try:
name_data = row_data[name]
except:
name_data = {'value': None}
row_data[name] = name_data
if do_history and 'history' not in name_data:
name_data['history'] = {}
if name_data['value'] != value:
name_data['value'] = value
if do_timestamp:
time = self.get_time()
time_stamp = self.convert_time_to_json(time)
name_data['time_stamp'] = time_stamp
if do_history:
time = self.get_time()
time_stamp = self.convert_time_to_json(time)
name_data['history'][time_stamp] = value
if reset_in_hours is not None:
timed = timedelta(hours=reset_in_hours)
expire_time = time + timed
expires_at = self.convert_time_to_json(expire_time)
reset_timers = self.reset_timers
reset_timers[expires_at] = {
'table': table,
'row': row,
'name': name,
}
if self.data[table] == {}:
self.data[table] = table_data
self.sync()
def get_time(self):
return datetime.utcnow()
def convert_time_to_json_ymd(self, datetime):
if datetime is not None:
return datetime.strftime('%Y-%m-%d')
else:
return None
def convert_time_to_json_ymdh(self, datetime):
if datetime is not None:
return datetime.strftime('%Y-%m-%dT%H')
else:
return None
def convert_time_to_json(self, datetime):
if datetime is not None:
return datetime.strftime('%Y-%m-%dT%H:%M:%S')
else:
return None
def convert_time_from_json(self, jsontime):
if jsontime is not None:
return datetime.strptime(jsontime, '%Y-%m-%dT%H:%M:%S')
else:
return None | {
"content_hash": "b9b1ade89eaf2fa56cb331f1751b0890",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 31.962162162162162,
"alnum_prop": 0.5124302384576357,
"repo_name": "Kovak/Kivy-DriveSync",
"id": "451649b621eaf4fddcefc575010c06ee539b1baf",
"size": "5913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy_drivesync/dbinterface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "409092"
}
],
"symlink_target": ""
} |
import hashlib
import json
from django.shortcuts import render_to_response, redirect, render, get_object_or_404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
from django.contrib.auth.models import User
from django.template.response import TemplateResponse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core import cache
from django.template import RequestContext
from .forms import AccountModelForm, UserCreationForm
from .models import Account
def register(request):
if not settings.ALLOW_NEW_REGISTRATIONS:
messages.error(request, "The admin of this service is not "
"allowing new registrations.")
return redirect(settings.SITE_URL)
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
messages.success(request, 'Thanks for registering. You are now logged in.')
new_user = authenticate(username=request.POST['username'], password=request.POST['password'])
login(request, new_user)
return redirect(settings.SITE_URL)
else:
form = UserCreationForm()
return TemplateResponse(request, 'core/register.html', {'form': form})
@login_required
def logout_user(request):
logout(request)
messages.success(request, 'You have successfully logged out.')
return redirect(settings.SITE_URL)
def EditAccount(request):
account = get_object_or_404(Account, username=request.user)
if request.method == 'POST':
f = AccountModelForm(request.POST or None, request.FILES, instance=account)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'Changes saved.')
return redirect('Core:AccountSettings')
else:
f = AccountModelForm(instance=account)
variables = RequestContext(request, {'form': f, 'user': account, 'account_settings': True,})
return render_to_response('core/account_settings.html', variables)
| {
"content_hash": "29d78d0580155140b81c9284158efdf4",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 105,
"avg_line_length": 39.21818181818182,
"alnum_prop": 0.7269355586462679,
"repo_name": "underlost/GamerNews",
"id": "93aa128ab2ef0e94abfb3b130cff3ad018412289",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamernews/apps/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "226951"
},
{
"name": "JavaScript",
"bytes": "135586"
},
{
"name": "Python",
"bytes": "124181"
}
],
"symlink_target": ""
} |
import datetime
from typing import Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class CheckNameAvailabilityResult(_serialization.Model):
"""The CheckNameAvailability operation response.
:ivar name_available: Boolean value that indicates whether the name is available for you to
use. If true, the name is available. If false, the name has already been taken or is invalid
and cannot be used.
:vartype name_available: bool
:ivar reason: The reason that a storage account name could not be used. The Reason element is
only returned if NameAvailable is false. Known values are: "AccountNameInvalid" and
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.storage.v2015_06_15.models.Reason
:ivar message: The error message explaining the Reason value in more detail.
:vartype message: str
"""
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[Union[str, "_models.Reason"]] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword name_available: Boolean value that indicates whether the name is available for you to
use. If true, the name is available. If false, the name has already been taken or is invalid
and cannot be used.
:paramtype name_available: bool
:keyword reason: The reason that a storage account name could not be used. The Reason element
is only returned if NameAvailable is false. Known values are: "AccountNameInvalid" and
"AlreadyExists".
:paramtype reason: str or ~azure.mgmt.storage.v2015_06_15.models.Reason
:keyword message: The error message explaining the Reason value in more detail.
:paramtype message: str
"""
super().__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class CustomDomain(_serialization.Model):
"""The custom domain assigned to this storage account. This can be set via Update.
All required parameters must be populated in order to send to Azure.
:ivar name: The custom domain name. Name is the CNAME source. Required.
:vartype name: str
:ivar use_sub_domain_name: Indicates whether indirect CName validation is enabled. Default
value is false. This should only be set on updates.
:vartype use_sub_domain_name: bool
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"use_sub_domain_name": {"key": "useSubDomainName", "type": "bool"},
}
def __init__(self, *, name: str, use_sub_domain_name: Optional[bool] = None, **kwargs):
"""
:keyword name: The custom domain name. Name is the CNAME source. Required.
:paramtype name: str
:keyword use_sub_domain_name: Indicates whether indirect CName validation is enabled. Default
value is false. This should only be set on updates.
:paramtype use_sub_domain_name: bool
"""
super().__init__(**kwargs)
self.name = name
self.use_sub_domain_name = use_sub_domain_name
class Endpoints(_serialization.Model):
"""The URIs that are used to perform a retrieval of a public blob, queue or table object.
:ivar blob: The blob endpoint.
:vartype blob: str
:ivar queue: The queue endpoint.
:vartype queue: str
:ivar table: The table endpoint.
:vartype table: str
:ivar file: The file endpoint.
:vartype file: str
"""
_attribute_map = {
"blob": {"key": "blob", "type": "str"},
"queue": {"key": "queue", "type": "str"},
"table": {"key": "table", "type": "str"},
"file": {"key": "file", "type": "str"},
}
def __init__(
self,
*,
blob: Optional[str] = None,
queue: Optional[str] = None,
table: Optional[str] = None,
file: Optional[str] = None,
**kwargs
):
"""
:keyword blob: The blob endpoint.
:paramtype blob: str
:keyword queue: The queue endpoint.
:paramtype queue: str
:keyword table: The table endpoint.
:paramtype table: str
:keyword file: The file endpoint.
:paramtype file: str
"""
super().__init__(**kwargs)
self.blob = blob
self.queue = queue
self.table = table
self.file = file
class Resource(_serialization.Model):
"""Describes a storage resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class StorageAccount(Resource): # pylint: disable=too-many-instance-attributes
"""The storage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar provisioning_state: The status of the storage account at the time the operation was
called. Known values are: "Creating", "ResolvingDNS", and "Succeeded".
:vartype provisioning_state: str or ~azure.mgmt.storage.v2015_06_15.models.ProvisioningState
:ivar account_type: The type of the storage account. Known values are: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", and "Premium_LRS".
:vartype account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
:ivar primary_endpoints: The URLs that are used to perform a retrieval of a public blob, queue,
or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint.
:vartype primary_endpoints: ~azure.mgmt.storage.v2015_06_15.models.Endpoints
:ivar primary_location: The location of the primary data center for the storage account.
:vartype primary_location: str
:ivar status_of_primary: The status indicating whether the primary location of the storage
account is available or unavailable. Known values are: "Available" and "Unavailable".
:vartype status_of_primary: str or ~azure.mgmt.storage.v2015_06_15.models.AccountStatus
:ivar last_geo_failover_time: The timestamp of the most recent instance of a failover to the
secondary location. Only the most recent timestamp is retained. This element is not returned if
there has never been a failover instance. Only available if the accountType is Standard_GRS or
Standard_RAGRS.
:vartype last_geo_failover_time: ~datetime.datetime
:ivar secondary_location: The location of the geo-replicated secondary for the storage account.
Only available if the accountType is Standard_GRS or Standard_RAGRS.
:vartype secondary_location: str
:ivar status_of_secondary: The status indicating whether the secondary location of the storage
account is available or unavailable. Only available if the SKU name is Standard_GRS or
Standard_RAGRS. Known values are: "Available" and "Unavailable".
:vartype status_of_secondary: str or ~azure.mgmt.storage.v2015_06_15.models.AccountStatus
:ivar creation_time: The creation date and time of the storage account in UTC.
:vartype creation_time: ~datetime.datetime
:ivar custom_domain: The custom domain the user assigned to this storage account.
:vartype custom_domain: ~azure.mgmt.storage.v2015_06_15.models.CustomDomain
:ivar secondary_endpoints: The URLs that are used to perform a retrieval of a public blob,
queue, or table object from the secondary location of the storage account. Only available if
the SKU name is Standard_RAGRS.
:vartype secondary_endpoints: ~azure.mgmt.storage.v2015_06_15.models.Endpoints
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"account_type": {"key": "properties.accountType", "type": "str"},
"primary_endpoints": {"key": "properties.primaryEndpoints", "type": "Endpoints"},
"primary_location": {"key": "properties.primaryLocation", "type": "str"},
"status_of_primary": {"key": "properties.statusOfPrimary", "type": "str"},
"last_geo_failover_time": {"key": "properties.lastGeoFailoverTime", "type": "iso-8601"},
"secondary_location": {"key": "properties.secondaryLocation", "type": "str"},
"status_of_secondary": {"key": "properties.statusOfSecondary", "type": "str"},
"creation_time": {"key": "properties.creationTime", "type": "iso-8601"},
"custom_domain": {"key": "properties.customDomain", "type": "CustomDomain"},
"secondary_endpoints": {"key": "properties.secondaryEndpoints", "type": "Endpoints"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
provisioning_state: Optional[Union[str, "_models.ProvisioningState"]] = None,
account_type: Optional[Union[str, "_models.AccountType"]] = None,
primary_endpoints: Optional["_models.Endpoints"] = None,
primary_location: Optional[str] = None,
status_of_primary: Optional[Union[str, "_models.AccountStatus"]] = None,
last_geo_failover_time: Optional[datetime.datetime] = None,
secondary_location: Optional[str] = None,
status_of_secondary: Optional[Union[str, "_models.AccountStatus"]] = None,
creation_time: Optional[datetime.datetime] = None,
custom_domain: Optional["_models.CustomDomain"] = None,
secondary_endpoints: Optional["_models.Endpoints"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword provisioning_state: The status of the storage account at the time the operation was
called. Known values are: "Creating", "ResolvingDNS", and "Succeeded".
:paramtype provisioning_state: str or ~azure.mgmt.storage.v2015_06_15.models.ProvisioningState
:keyword account_type: The type of the storage account. Known values are: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", and "Premium_LRS".
:paramtype account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
:keyword primary_endpoints: The URLs that are used to perform a retrieval of a public blob,
queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob
endpoint.
:paramtype primary_endpoints: ~azure.mgmt.storage.v2015_06_15.models.Endpoints
:keyword primary_location: The location of the primary data center for the storage account.
:paramtype primary_location: str
:keyword status_of_primary: The status indicating whether the primary location of the storage
account is available or unavailable. Known values are: "Available" and "Unavailable".
:paramtype status_of_primary: str or ~azure.mgmt.storage.v2015_06_15.models.AccountStatus
:keyword last_geo_failover_time: The timestamp of the most recent instance of a failover to the
secondary location. Only the most recent timestamp is retained. This element is not returned if
there has never been a failover instance. Only available if the accountType is Standard_GRS or
Standard_RAGRS.
:paramtype last_geo_failover_time: ~datetime.datetime
:keyword secondary_location: The location of the geo-replicated secondary for the storage
account. Only available if the accountType is Standard_GRS or Standard_RAGRS.
:paramtype secondary_location: str
:keyword status_of_secondary: The status indicating whether the secondary location of the
storage account is available or unavailable. Only available if the SKU name is Standard_GRS or
Standard_RAGRS. Known values are: "Available" and "Unavailable".
:paramtype status_of_secondary: str or ~azure.mgmt.storage.v2015_06_15.models.AccountStatus
:keyword creation_time: The creation date and time of the storage account in UTC.
:paramtype creation_time: ~datetime.datetime
:keyword custom_domain: The custom domain the user assigned to this storage account.
:paramtype custom_domain: ~azure.mgmt.storage.v2015_06_15.models.CustomDomain
:keyword secondary_endpoints: The URLs that are used to perform a retrieval of a public blob,
queue, or table object from the secondary location of the storage account. Only available if
the SKU name is Standard_RAGRS.
:paramtype secondary_endpoints: ~azure.mgmt.storage.v2015_06_15.models.Endpoints
"""
super().__init__(location=location, tags=tags, **kwargs)
self.provisioning_state = provisioning_state
self.account_type = account_type
self.primary_endpoints = primary_endpoints
self.primary_location = primary_location
self.status_of_primary = status_of_primary
self.last_geo_failover_time = last_geo_failover_time
self.secondary_location = secondary_location
self.status_of_secondary = status_of_secondary
self.creation_time = creation_time
self.custom_domain = custom_domain
self.secondary_endpoints = secondary_endpoints
class StorageAccountCheckNameAvailabilityParameters(_serialization.Model):
"""The parameters used to check the availability of the storage account name.
All required parameters must be populated in order to send to Azure.
:ivar name: Required.
:vartype name: str
:ivar type:
:vartype type: str
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, name: str, type: str = "Microsoft.Storage/storageAccounts", **kwargs):
"""
:keyword name: Required.
:paramtype name: str
:keyword type:
:paramtype type: str
"""
super().__init__(**kwargs)
self.name = name
self.type = type
class StorageAccountCreateParameters(_serialization.Model):
"""The parameters to provide for the account.
All required parameters must be populated in order to send to Azure.
:ivar location: The location of the resource. This will be one of the supported and registered
Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource
cannot be changed once it is created, but if an identical geo region is specified on update,
the request will succeed. Required.
:vartype location: str
:ivar tags: A list of key value pairs that describe the resource. These tags can be used for
viewing and grouping this resource (across resource groups). A maximum of 15 tags can be
provided for a resource. Each tag must have a key with a length no greater than 128 characters
and a value with a length no greater than 256 characters.
:vartype tags: dict[str, str]
:ivar account_type: The sku name. Required for account creation; optional for update. Note that
in older versions, sku name was called accountType. Known values are: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", and "Premium_LRS".
:vartype account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
"""
_validation = {
"location": {"required": True},
}
_attribute_map = {
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"account_type": {"key": "properties.accountType", "type": "str"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
account_type: Optional[Union[str, "_models.AccountType"]] = None,
**kwargs
):
"""
:keyword location: The location of the resource. This will be one of the supported and
registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a
resource cannot be changed once it is created, but if an identical geo region is specified on
update, the request will succeed. Required.
:paramtype location: str
:keyword tags: A list of key value pairs that describe the resource. These tags can be used for
viewing and grouping this resource (across resource groups). A maximum of 15 tags can be
provided for a resource. Each tag must have a key with a length no greater than 128 characters
and a value with a length no greater than 256 characters.
:paramtype tags: dict[str, str]
:keyword account_type: The sku name. Required for account creation; optional for update. Note
that in older versions, sku name was called accountType. Known values are: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", and "Premium_LRS".
:paramtype account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
"""
super().__init__(**kwargs)
self.location = location
self.tags = tags
self.account_type = account_type
class StorageAccountKeys(_serialization.Model):
"""The access keys for the storage account.
:ivar key1: The value of key 1.
:vartype key1: str
:ivar key2: The value of key 2.
:vartype key2: str
"""
_attribute_map = {
"key1": {"key": "key1", "type": "str"},
"key2": {"key": "key2", "type": "str"},
}
def __init__(self, *, key1: Optional[str] = None, key2: Optional[str] = None, **kwargs):
"""
:keyword key1: The value of key 1.
:paramtype key1: str
:keyword key2: The value of key 2.
:paramtype key2: str
"""
super().__init__(**kwargs)
self.key1 = key1
self.key2 = key2
class StorageAccountListResult(_serialization.Model):
"""The list storage accounts operation response.
:ivar value: The list of storage accounts and their properties.
:vartype value: list[~azure.mgmt.storage.v2015_06_15.models.StorageAccount]
"""
_attribute_map = {
"value": {"key": "value", "type": "[StorageAccount]"},
}
def __init__(self, *, value: Optional[List["_models.StorageAccount"]] = None, **kwargs):
"""
:keyword value: The list of storage accounts and their properties.
:paramtype value: list[~azure.mgmt.storage.v2015_06_15.models.StorageAccount]
"""
super().__init__(**kwargs)
self.value = value
class StorageAccountRegenerateKeyParameters(_serialization.Model):
"""The parameters used to regenerate the storage account key.
All required parameters must be populated in order to send to Azure.
:ivar key_name: Required.
:vartype key_name: str
"""
_validation = {
"key_name": {"required": True},
}
_attribute_map = {
"key_name": {"key": "keyName", "type": "str"},
}
def __init__(self, *, key_name: str, **kwargs):
"""
:keyword key_name: Required.
:paramtype key_name: str
"""
super().__init__(**kwargs)
self.key_name = key_name
class StorageAccountUpdateParameters(_serialization.Model):
"""The parameters to update on the account.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar account_type: The account type. Note that StandardZRS and PremiumLRS accounts cannot be
changed to other account types, and other account types cannot be changed to StandardZRS or
PremiumLRS. Known values are: "Standard_LRS", "Standard_ZRS", "Standard_GRS", "Standard_RAGRS",
and "Premium_LRS".
:vartype account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
:ivar custom_domain: User domain assigned to the storage account. Name is the CNAME source.
Only one custom domain is supported per storage account at this time. To clear the existing
custom domain, use an empty string for the custom domain name property.
:vartype custom_domain: ~azure.mgmt.storage.v2015_06_15.models.CustomDomain
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"account_type": {"key": "properties.accountType", "type": "str"},
"custom_domain": {"key": "properties.customDomain", "type": "CustomDomain"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
account_type: Optional[Union[str, "_models.AccountType"]] = None,
custom_domain: Optional["_models.CustomDomain"] = None,
**kwargs
):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword account_type: The account type. Note that StandardZRS and PremiumLRS accounts cannot
be changed to other account types, and other account types cannot be changed to StandardZRS or
PremiumLRS. Known values are: "Standard_LRS", "Standard_ZRS", "Standard_GRS", "Standard_RAGRS",
and "Premium_LRS".
:paramtype account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
:keyword custom_domain: User domain assigned to the storage account. Name is the CNAME source.
Only one custom domain is supported per storage account at this time. To clear the existing
custom domain, use an empty string for the custom domain name property.
:paramtype custom_domain: ~azure.mgmt.storage.v2015_06_15.models.CustomDomain
"""
super().__init__(**kwargs)
self.tags = tags
self.account_type = account_type
self.custom_domain = custom_domain
class Usage(_serialization.Model):
"""Describes Storage Resource Usage.
All required parameters must be populated in order to send to Azure.
:ivar unit: The unit of measurement. Required. Known values are: "Count", "Bytes", "Seconds",
"Percent", "CountsPerSecond", and "BytesPerSecond".
:vartype unit: str or ~azure.mgmt.storage.v2015_06_15.models.UsageUnit
:ivar current_value: The current count of the allocated resources in the subscription.
Required.
:vartype current_value: int
:ivar limit: The maximum count of the resources that can be allocated in the subscription.
Required.
:vartype limit: int
:ivar name: The name of the type of usage. Required.
:vartype name: ~azure.mgmt.storage.v2015_06_15.models.UsageName
"""
_validation = {
"unit": {"required": True},
"current_value": {"required": True},
"limit": {"required": True},
"name": {"required": True},
}
_attribute_map = {
"unit": {"key": "unit", "type": "str"},
"current_value": {"key": "currentValue", "type": "int"},
"limit": {"key": "limit", "type": "int"},
"name": {"key": "name", "type": "UsageName"},
}
def __init__(
self,
*,
unit: Union[str, "_models.UsageUnit"],
current_value: int,
limit: int,
name: "_models.UsageName",
**kwargs
):
"""
:keyword unit: The unit of measurement. Required. Known values are: "Count", "Bytes",
"Seconds", "Percent", "CountsPerSecond", and "BytesPerSecond".
:paramtype unit: str or ~azure.mgmt.storage.v2015_06_15.models.UsageUnit
:keyword current_value: The current count of the allocated resources in the subscription.
Required.
:paramtype current_value: int
:keyword limit: The maximum count of the resources that can be allocated in the subscription.
Required.
:paramtype limit: int
:keyword name: The name of the type of usage. Required.
:paramtype name: ~azure.mgmt.storage.v2015_06_15.models.UsageName
"""
super().__init__(**kwargs)
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UsageListResult(_serialization.Model):
"""The List Usages operation response.
:ivar value: The list Storage Resource Usages.
:vartype value: list[~azure.mgmt.storage.v2015_06_15.models.Usage]
"""
_attribute_map = {
"value": {"key": "value", "type": "[Usage]"},
}
def __init__(self, *, value: Optional[List["_models.Usage"]] = None, **kwargs):
"""
:keyword value: The list Storage Resource Usages.
:paramtype value: list[~azure.mgmt.storage.v2015_06_15.models.Usage]
"""
super().__init__(**kwargs)
self.value = value
class UsageName(_serialization.Model):
"""The Usage Names.
:ivar value: A string describing the resource name.
:vartype value: str
:ivar localized_value: A localized string describing the resource name.
:vartype localized_value: str
"""
_attribute_map = {
"value": {"key": "value", "type": "str"},
"localized_value": {"key": "localizedValue", "type": "str"},
}
def __init__(self, *, value: Optional[str] = None, localized_value: Optional[str] = None, **kwargs):
"""
:keyword value: A string describing the resource name.
:paramtype value: str
:keyword localized_value: A localized string describing the resource name.
:paramtype localized_value: str
"""
super().__init__(**kwargs)
self.value = value
self.localized_value = localized_value
| {
"content_hash": "fa88d140986898b9914ed13895a5a565",
"timestamp": "",
"source": "github",
"line_count": 651,
"max_line_length": 107,
"avg_line_length": 42.47004608294931,
"alnum_prop": 0.6431568287037037,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8ea96cca8ab67497b42ab454d1a2c1653ce530be",
"size": "28149",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2015_06_15/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import json
import os
import tempfile
import pytest
import sqlalchemy as sa
import testing.postgresql
import tornado.options
import tornado.testing
from sqlalchemy.orm import sessionmaker
from .. import app
from .. import dbinterface as dbi
from .. import models
def setup_module(module):
module.PG_FACTORY = testing.postgresql.PostgresqlFactory(
cache_initialized_db=True)
def teardown_module(module):
module.PG_FACTORY.clear_cache()
@pytest.fixture(autouse=True)
def set_salts(monkeypatch):
monkeypatch.setenv('HASHIDS_PUBLIC_SALT', 'public')
monkeypatch.setenv('HASHIDS_SECRET_SALT', 'secret')
def data_2x2():
return [[(1, 2, 3, 4), (5, 6, 7, 8)],
[(9, 10, 11, 12), (13, 14, 15, 16)]]
def request():
return {
'python_version': (2, 7, 6, 'final', 0),
'ipb_version': '1.6',
'ipb_class': 'BlockGrid',
'code_cells': ['asdf', 'jkl;'],
'secret': False,
'grid_data': {
'lines_on': True,
'width': 2,
'height': 2,
'blocks': data_2x2()
}
}
class UtilBase(tornado.testing.AsyncHTTPTestCase):
def setup_method(self, method):
self.postgresql = PG_FACTORY()
self.engine = sa.create_engine(self.postgresql.url())
models.Base.metadata.create_all(bind=self.engine)
self.Session = sessionmaker(bind=self.engine)
self.session = self.Session()
tornado.options.options.db_url = self.postgresql.url()
def teardown_method(self, method):
self.session.close()
self.Session.close_all()
self.engine.dispose()
self.postgresql.stop()
def get_app(self):
return app.make_application()
def get_response(self, body=None):
return self.fetch(self.app_url, method=self.method, body=body)
def save_grid(self, secret):
req = request()
req['secret'] = secret
hash_id = dbi.store_grid_entry(self.session, req)
self.session.commit()
return hash_id
class TestPostGrid(UtilBase):
app_url = '/post'
method = 'POST'
def test_json_failure(self):
response = self.get_response('{"asdf"}')
assert response.code == 400
def test_validation_failure(self):
response = self.get_response('{"asdf": 5}')
assert response.code == 400
def test_returns_url(self):
req = request()
response = self.get_response(json.dumps(req))
assert response.code == 200
assert 'application/json' in response.headers['Content-Type']
body = json.loads(response.body)
assert body['url'] == 'http://www.ipythonblocks.org/bizkiL'
def test_returns_url_secret(self):
req = request()
req['secret'] = True
response = self.get_response(json.dumps(req))
assert response.code == 200
assert 'application/json' in response.headers['Content-Type']
body = json.loads(response.body)
assert body['url'] == 'http://www.ipythonblocks.org/secret/MiXoi4'
def test_stores_data(self):
req = request()
response = self.get_response(json.dumps(req))
assert response.code == 200
body = json.loads(response.body)
hash_id = body['url'].split('/')[-1]
grid_spec = dbi.get_grid_entry(self.session, hash_id)
assert grid_spec.id == 1
comp_data= json.loads(json.dumps(req))
for key, value in comp_data.items():
assert getattr(grid_spec, key) == value
class TestGetGrid(UtilBase):
method = 'GET'
def test_returns_404(self):
self.app_url = '/get/asdf'
response = self.get_response()
assert response.code == 404
def test_get_grid(self):
grid_id = self.save_grid(False)
self.app_url = '/get/{}'.format(grid_id)
response = self.get_response()
assert response.code == 200
body = json.loads(response.body)
req = request()
assert body == json.loads(json.dumps(req['grid_data']))
def test_get_grid_secret(self):
grid_id = self.save_grid(True)
self.app_url = '/get/secret/{}'.format(grid_id)
response = self.get_response()
assert response.code == 200
body = json.loads(response.body)
req = request()
assert body == json.loads(json.dumps(req['grid_data']))
class TestRandomHandler(UtilBase):
def test_random(self):
grid_id = self.save_grid(False)
self.http_client.fetch(
self.get_url('/random'), self.stop,
method='GET', follow_redirects=False)
response = self.wait()
assert response.code == 303
assert response.headers['Location'] == '/{}'.format(grid_id)
class TestRenderGrid(UtilBase):
method = 'GET'
def test_render(self):
hash_id = self.save_grid(False)
self.app_url = '/{}'.format(hash_id)
response = self.get_response()
assert response.code == 200
assert b'<table' in response.body
assert b'asdf' in response.body
def test_render_secret(self):
hash_id = self.save_grid(True)
self.app_url = '/secret/{}'.format(hash_id)
response = self.get_response()
assert response.code == 200
assert b'<table' in response.body
assert b'asdf' in response.body
| {
"content_hash": "2cde9f8c86c67d4b7c65cb6417a42026",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 74,
"avg_line_length": 27.392857142857142,
"alnum_prop": 0.6008567703482958,
"repo_name": "jiffyclub/ipythonblocks.org",
"id": "e9e99ba2c465304e635a8ea993a0c17aeae81764",
"size": "5369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/test/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6067"
},
{
"name": "HTML",
"bytes": "49913"
},
{
"name": "Python",
"bytes": "24577"
}
],
"symlink_target": ""
} |
from gevent.event import Event, AsyncResult, Timeout
from six.moves import xrange
__all__ = ['Events', 'AsyncResults', 'DaemonState']
class AsyncResultWrapper(object):
def __init__(self):
self.async_result = None
def create(self):
if self.async_result is not None:
assert False, 'Trying to create async_result but it is not None'
self.async_result = AsyncResult()
def get(self, *args, **kwargs):
try:
result = self.async_result.get(*args, **kwargs)
except Timeout:
result = None
self.async_result = None
return result
def set(self, *args, **kwargs):
if self.async_result is not None:
self.async_result.set(*args, **kwargs)
class Events:
shutdown_required = Event()
state_changed = Event()
core_start_ready = Event()
class AsyncResults:
remote_directory_listing = AsyncResultWrapper()
class DaemonState:
(
AUTHORIZATION_REQUIRED,
AUTHORIZATION_OK,
OFFLINE,
ROOT_FOLDER_MISSING,
) = xrange(4)
current = None
| {
"content_hash": "0f8abf763ab472d8e532c9313c3792e5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.6135746606334842,
"repo_name": "sapo/meocloud-cli",
"id": "73d27210640114ec1b10775ad5521168e16495b2",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meocloud/client/linux/daemon/communication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "459181"
},
{
"name": "Shell",
"bytes": "5975"
},
{
"name": "Thrift",
"bytes": "1689"
}
],
"symlink_target": ""
} |
import sys
extra = {}
if sys.version_info >= (3, 0):
extra.update(use_2to3=True)
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
author = "Rune Halvorsen"
email = "runefh@gmail.com"
version = "0.3"
desc = """Wraps the best available JSON implementation available in a common interface"""
setup(name='anyjson',
version=version,
description=desc,
long_description=open("README").read(),
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
],
keywords='json',
author=author,
author_email=email,
url='http://bitbucket.org/runeh/anyjson',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
zip_safe=False,
platforms=["any"],
test_suite = 'nose.collector',
**extra
)
| {
"content_hash": "c774bdd0f13c4c35b5d3dbd901335cd7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 31.325581395348838,
"alnum_prop": 0.5924276169265034,
"repo_name": "kennethreitz-archive/anyjson",
"id": "4f601ce5ede795092d38e7a4609ec63e12cd59fc",
"size": "1347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11771"
}
],
"symlink_target": ""
} |
import logging
import os
import json
import threading
from media.item import item_builders, item_id_generators, dict_to_item
import media.file
import media.url
import media.url_from_playlist
import media.radio
from database import MusicDatabase, Condition
import variables as var
import util
class ItemNotCachedError(Exception):
pass
class MusicCache(dict):
def __init__(self, db: MusicDatabase):
super().__init__()
self.db = db
self.log = logging.getLogger("bot")
self.dir_lock = threading.Lock()
def get_item_by_id(self, id):
if id in self:
return self[id]
# if not cached, query the database
item = self.fetch(id)
if item is not None:
self[id] = item
self.log.debug("library: music found in database: %s" % item.format_debug_string())
return item
else:
return None
# print(id)
# raise KeyError("Unable to fetch item from the database! Please try to refresh the cache by !recache.")
def get_item(self, **kwargs):
# kwargs should provide type and id, and parameters to build the item if not existed in the library.
# if cached
if 'id' in kwargs:
id = kwargs['id']
else:
id = item_id_generators[kwargs['type']](**kwargs)
if id in self:
return self[id]
# if not cached, query the database
item = self.fetch(id)
if item is not None:
self[id] = item
self.log.debug("library: music found in database: %s" % item.format_debug_string())
return item
# if not in the database, build one
self[id] = item_builders[kwargs['type']](**kwargs) # newly built item will not be saved immediately
return self[id]
def get_items_by_tags(self, tags):
music_dicts = self.db.query_music_by_tags(tags)
items = []
if music_dicts:
for music_dict in music_dicts:
id = music_dict['id']
self[id] = dict_to_item(music_dict)
items.append(self[id])
return items
def fetch(self, id):
music_dict = self.db.query_music_by_id(id)
if music_dict:
self[id] = dict_to_item(music_dict)
return self[id]
else:
return None
def save(self, id):
self.log.debug("library: music save into database: %s" % self[id].format_debug_string())
self.db.insert_music(self[id].to_dict())
self.db.manage_special_tags()
def free_and_delete(self, id):
item = self.get_item_by_id(id)
if item:
self.log.debug("library: DELETE item from the database: %s" % item.format_debug_string())
if item.type == 'url':
if os.path.exists(item.path):
os.remove(item.path)
if item.id in self:
del self[item.id]
self.db.delete_music(Condition().and_equal("id", item.id))
def free(self, id):
if id in self:
self.log.debug("library: cache freed for item: %s" % self[id].format_debug_string())
del self[id]
def free_all(self):
self.log.debug("library: all cache freed")
self.clear()
def build_dir_cache(self):
self.dir_lock.acquire()
self.log.info("library: rebuild directory cache")
files = util.get_recursive_file_list_sorted(var.music_folder)
# remove deleted files
results = self.db.query_music(Condition().or_equal('type', 'file'))
for result in results:
if result['path'] not in files:
self.log.debug("library: music file missed: %s, delete from library." % result['path'])
self.db.delete_music(Condition().and_equal('id', result['id']))
else:
files.remove(result['path'])
for file in files:
results = self.db.query_music(Condition().and_equal('path', file))
if not results:
item = item_builders['file'](path=file)
self.log.debug("library: music save into database: %s" % item.format_debug_string())
self.db.insert_music(item.to_dict())
self.db.manage_special_tags()
self.dir_lock.release()
class CachedItemWrapper:
def __init__(self, lib, id, type, user):
self.lib = lib
self.id = id
self.user = user
self.type = type
self.log = logging.getLogger("bot")
self.version = 0
def item(self):
if self.id in self.lib:
return self.lib[self.id]
else:
raise ItemNotCachedError(f"Uncached item of id {self.id}, type {self.type}.")
def to_dict(self):
dict = self.item().to_dict()
dict['user'] = self.user
return dict
def validate(self):
ret = self.item().validate()
if ret and self.item().version > self.version:
self.version = self.item().version
self.lib.save(self.id)
return ret
def prepare(self):
ret = self.item().prepare()
if ret and self.item().version > self.version:
self.version = self.item().version
self.lib.save(self.id)
return ret
def uri(self):
return self.item().uri()
def add_tags(self, tags):
self.item().add_tags(tags)
if self.item().version > self.version:
self.version = self.item().version
self.lib.save(self.id)
def remove_tags(self, tags):
self.item().remove_tags(tags)
if self.item().version > self.version:
self.version = self.item().version
self.lib.save(self.id)
def clear_tags(self):
self.item().clear_tags()
if self.item().version > self.version:
self.version = self.item().version
self.lib.save(self.id)
def is_ready(self):
return self.item().is_ready()
def is_failed(self):
return self.item().is_failed()
def format_current_playing(self):
return self.item().format_current_playing(self.user)
def format_song_string(self):
return self.item().format_song_string(self.user)
def format_title(self):
return self.item().format_title()
def format_debug_string(self):
return self.item().format_debug_string()
def display_type(self):
return self.item().display_type()
# Remember!!! Get wrapper functions will automatically add items into the cache!
def get_cached_wrapper(item, user):
if item:
var.cache[item.id] = item
return CachedItemWrapper(var.cache, item.id, item.type, user)
return None
def get_cached_wrappers(items, user):
wrappers = []
for item in items:
if item:
wrappers.append(get_cached_wrapper(item, user))
return wrappers
def get_cached_wrapper_from_scrap(**kwargs):
item = var.cache.get_item(**kwargs)
if 'user' not in kwargs:
raise KeyError("Which user added this song?")
return CachedItemWrapper(var.cache, item.id, kwargs['type'], kwargs['user'])
def get_cached_wrapper_from_dict(dict_from_db, user):
if dict_from_db:
item = dict_to_item(dict_from_db)
return get_cached_wrapper(item, user)
return None
def get_cached_wrappers_from_dicts(dicts_from_db, user):
items = []
for dict_from_db in dicts_from_db:
if dict_from_db:
items.append(get_cached_wrapper_from_dict(dict_from_db, user))
return items
def get_cached_wrapper_by_id(id, user):
item = var.cache.get_item_by_id(id)
if item:
return CachedItemWrapper(var.cache, item.id, item.type, user)
def get_cached_wrappers_by_tags(tags, user):
items = var.cache.get_items_by_tags(tags)
ret = []
for item in items:
ret.append(CachedItemWrapper(var.cache, item.id, item.type, user))
return ret
| {
"content_hash": "aaa75bd88eda9986d0a23e542117ff95",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 116,
"avg_line_length": 30.968992248062015,
"alnum_prop": 0.5862327909887359,
"repo_name": "azlux/botamusique",
"id": "0202ca62074e68a1ef7f7d8e36e8ac3062ebfec8",
"size": "7990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "media/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "760"
},
{
"name": "HTML",
"bytes": "32379"
},
{
"name": "JavaScript",
"bytes": "44651"
},
{
"name": "Python",
"bytes": "219191"
},
{
"name": "SCSS",
"bytes": "4307"
},
{
"name": "Shell",
"bytes": "5175"
}
],
"symlink_target": ""
} |
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <http://packages.python.org/Markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007-2012 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
version = "2.1.1"
version_info = (2,1,1, "final")
import re
import codecs
import sys
import logging
import util
from preprocessors import build_preprocessors
from blockprocessors import build_block_parser
from treeprocessors import build_treeprocessors
from inlinepatterns import build_inlinepatterns
from postprocessors import build_postprocessors
from extensions import Extension
from serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown:
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text' : '[HTML_REMOVED]',
'tab_length' : 4,
'enable_attributes' : True,
'smart_emphasis' : True,
'lazy_ol' : True,
}
output_formats = {
'html' : to_html_string,
'html4' : to_html_string,
'html5' : to_html_string,
'xhtml' : to_xhtml_string,
'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string,
}
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension_configs: Configuration settingis for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* html_replacement_text: Text used when safe_mode is set to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelegently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True
"""
# For backward compatability, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
c = 0
for arg in args:
if not kwargs.has_key(pos[c]):
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
# ignore any additional args
break
# Loop through kwargs and assign defaults
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = self.build_extension(ext, configs.get(ext, []))
if isinstance(ext, Extension):
# might raise NotImplementedError, but that's the extension author's problem
ext.extendMarkdown(self, globals())
else:
raise ValueError('Extension "%s.%s" must be of type: "markdown.Extension".' \
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs = []):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module name
module_name = ext_name
if '.' not in ext_name:
module_name = '.'.join(['markdown.extensions', ext_name])
# Try loading the extension first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name, {}, {}, [module_name.rpartition('.')[0]])
except ImportError:
module_name_old_style = '_'.join(['mdx', ext_name])
try: # Old style (mdx_<extension>)
module = __import__(module_name_old_style)
except ImportError:
logger.warn("Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError, e:
logger.warn("Failed to initiate extension '%s': %s" % (ext_name, e))
return None
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except KeyError:
raise KeyError('Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError, e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
source = source.replace(util.STX, "").replace(util.ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(self.tab_length)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%self.doc_tag)+len(self.doc_tag)+2
end = output.rindex('</%s>'%self.doc_tag)
output = output[start:end].strip()
except ValueError:
if output.strip().endswith('<%s />'%self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, str):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, unicode):
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, str):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
# For backward compatibility loop through positional args
pos = ['input', 'output', 'extensions', 'encoding']
c = 0
for arg in args:
if not kwargs.has_key(pos[c]):
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
break
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))
| {
"content_hash": "bc448fd4f98cb7ab7c91a13364c06ca9",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 109,
"avg_line_length": 36.33644859813084,
"alnum_prop": 0.5920138888888888,
"repo_name": "GoSteven/Diary",
"id": "31009011602470e93a4c52ea3467bd476828907d",
"size": "15552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markdown/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "96307"
},
{
"name": "Python",
"bytes": "4274191"
}
],
"symlink_target": ""
} |
import math
from collections import namedtuple
Customer = namedtuple("Customer", ['index', 'demand', 'x', 'y'])
def length(customer1, customer2):
return math.sqrt((customer1[1] - customer2[1])**2 + (customer1[2] - customer2[2])**2)
######### TODO replace may with list comperhensions
def grade(input_data, quality_data, submission):
score = 0
scoreUB = 1.0
feedback = ''
lines = input_data.split('\n')
parts = lines[0].split()
customerCount = int(parts[0])
vehicleCount = int(parts[1])
vehicleCapacity = int(parts[2])
depotIndex = 0
customers = []
for i in range(1,customerCount+1):
line = lines[i]
parts = line.split()
customers.append((int(parts[0]), float(parts[1]), float(parts[2])))
subLines = submission.splitlines();
if(len(subLines) != vehicleCount+2) :
return {'score':0.0, 'feedback':'output should have '+str(vehicleCount+1)+' lines, this one has '+str(len(subLines)-1)}
lineOneParts = subLines[0].split()
if(len(lineOneParts) != 2) :
return {'score':0.0, 'feedback':'the first output line should have 2 values, this one has '+str(len(lineOneParts))}
try:
obj = int(float(lineOneParts[0]))
except:
return {'score':0.0, 'feedback':'the first output line should only contain numbers, this output has the following bad value: '+lineOneParts[0]}
try:
opt = int(lineOneParts[1])
except:
return {'score':0.0, 'feedback':'the first output line should only contain numbers, this output has the following bad value: '+lineOneParts[1]}
tours = []
for v in range(0, vehicleCount):
vtour = subLines[1+v].split()
if len(vtour) < 2 :
return {'score':0.0, 'feedback':'the vehicle output line '+str(v)+' should have at least 2 values, this one has '+str(len(vtour))}
badVals = set()
for val in vtour:
if not val.isdigit():
badVals.add(val)
if len(badVals) > 0:
return {'score':0.0, 'feedback':'the vehicle output line '+str(v)+' should only contain integers, this output has the following bad values: '+(', '.join([str(x) for x in badVals]))}
vtour = [int(x) for x in vtour]
badVals = set()
for val in vtour:
if val < 0 or val > customerCount-1:
badVals.add(val)
if len(badVals) > 0:
return {'score':0.0, 'feedback':'the vehicle output line '+str(v)+' should only contain values between 0 and '+str(customerCount-1)+', this output has the following bad values: '+(', '.join([str(x) for x in badVals]))}
if vtour[0] != depotIndex:
return {'score':0.0, 'feedback':'the vehicle output line '+str(v)+' does not start at the depot. The line should begin with the value '+str(depotIndex)+' but it starts with the value '+str(vtour[0])}
if vtour[-1] != depotIndex:
return {'score':0.0, 'feedback':'the vehicle output line '+str(v)+' does not end at the depot. The line should end with the value '+str(depotIndex)+' but it ends with the value '+str(vtour[-1])}
tours.append(vtour)
try:
runtime = float(subLines[-1])
except (ValueError, TypeError):
return {'score':0.0, 'feedback': 'The evaluation script has failed with error code (2). Sorry for the inconvenience. Please post this message in the \'Platform Feedback\' forum.'+feedback}
unused = set(range(0, customerCount))
unused.discard(depotIndex)
for vtour in tours:
for c in vtour:
unused.discard(c)
if len(unused) > 0:
return {'score':0.0, 'feedback':'the solution does not contain all of the customers in 0..'+str(customerCount-1)+', this solution is missing: '+(', '.join([str(x) for x in unused]))}
totalCustomers = sum([len(vtour)-2 for vtour in tours])
if totalCustomers != customerCount-1:
return {'score':0.0, 'feedback':'the solution contains '+str(totalCustomers)+' customers, but only '+str(customerCount-1)+' are required.' }
load = [0]*vehicleCount
value = 0
for v in range(0, vehicleCount):
vtour = tours[v]
for i in range(0, len(vtour)-1):
load[v] += customers[vtour[i]][0]
value += length(customers[vtour[i]],customers[vtour[i+1]])
overloaded = []
for v in range(0,vehicleCount):
if load[v] > vehicleCapacity:
overloaded.append('vehicle '+str(v)+' is over the capacity limit by '+str(load[v]-vehicleCapacity))
if len(overloaded) > 0:
return {'score':0.0, 'feedback':'the solution has vehicle capacity violations: '+', '.join(overloaded)}
#print obj, value
if abs(value - obj) > 1 :
feedback = feedback + '\nWarning: submitted objective value is inconsistent with actual value. given: '+str(obj)+' actual value: '+str(value)
# if opt :
# results = db.checkForLess(metadata.assignment_id, problem_id, value)
# if results != None and len(results) > 0 : # todo we should make this a little mode robust to floating point arithmetic
# bestVal = min([x[4] for x in results]) #4 is the quality column in the DB
# feedback = feedback + '\nWarning: your algorithm claimed to have an optimal solution with objective value '+str(value)+'. However, a solution exists with an objective value of '+str(int(bestVal))+' demonstrating that your solution is not optimal.'
if(runtime > 18000.0) :
feedback = feedback + '\nNote: your algorithm runtime exceeded the time limit (5 hours), setting your score limit to 7. For a better grade, run your algorithm within the 5 hour time limit.'
scoreUB = 0.7
if(value <= quality_data.pt10):
if opt:
return {'score':min(1.0,scoreUB), 'feedback':'Awesome Optimization! Your algorithm is competitive with an expert solution and your solution objective value '+str(value)+' appears to be optimal! You can\'t beat that.'+feedback}
else :
return {'score':min(1.0,scoreUB), 'feedback':'Awesome Optimization! Your algorithm is competitive with an expert solution. Can you prove that your solution objective value '+str(value)+' is optimal? '+feedback}
elif(value <= quality_data.pt3):
return {'score':min(0.7,scoreUB), 'feedback':'Good Optimization. Your algorithm does some basic optimization but your solution objective value '+str(value)+' can be improved significantly. For a higher grade, you will need to improve the objective value to '+str(quality_data.pt10)+' or better. '+feedback}
else:
return {'score':min(0.3,scoreUB), 'feedback':'Your submission output is good, but the solution objective value '+str(value)+' is insufficient for full credit. For a higher grade, you will need to improve the objective value to '+str(quality_data.pt3)+' or better. '+feedback}
return {'score':0.0, 'feedback': 'The evaluation script has failed with error code (1). Sorry for the inconvenience. Please post this message in the \'Platform Feedback\' forum.'+feedback}
| {
"content_hash": "cbbfeebff07bdecc85f66ac346fe7ce4",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 314,
"avg_line_length": 50.906474820143885,
"alnum_prop": 0.6445732052006784,
"repo_name": "discreteoptimization/assignment",
"id": "5d4e02247f55c25ad5e587b4a139079cd7003acf",
"size": "7076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vrp/grader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "530"
},
{
"name": "Java",
"bytes": "2525"
},
{
"name": "Python",
"bytes": "107424"
},
{
"name": "Shell",
"bytes": "8163"
},
{
"name": "TeX",
"bytes": "41526"
}
],
"symlink_target": ""
} |
from blocks.bricks import Tanh
from blocks.algorithms import BasicMomentum, AdaDelta, RMSProp, Adam, CompositeRule, StepClipping, Momentum
from blocks.initialization import IsotropicGaussian, Constant, Orthogonal
from model.index_attention import Model
add_cnn_data = 0.0
batch_size = 64
sort_batch_count = 20
shuffle_questions = True
shuffle_entities = True
concat_ctx_and_question = False
concat_question_before = False
embed_size = 200
ctx_lstm_size = [256, 256]
ctx_skip_connections = False
question_lstm_size = [256]
question_skip_connections = True
attention_mlp_hidden = [200]
attention_mlp_activations = [Tanh()]
step_rule = CompositeRule([RMSProp(decay_rate=0.95, learning_rate=5e-5),
BasicMomentum(momentum=0.9)])
dropout = 0.2
w_noise = 0.
valid_freq = 10000
save_freq = 10000
print_freq = 1000
weights_init = IsotropicGaussian(0.01)
biases_init = Constant(0.)
transition_weights_init = Orthogonal()
| {
"content_hash": "fc06fc30e39ca40cb265caca482a82c9",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 107,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.740546218487395,
"repo_name": "arianhosseini/Question-Answering",
"id": "1965b4db74ca8f6f53030fda7e5fdf8c1c28a17d",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/LMU_index_attention.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226172"
}
],
"symlink_target": ""
} |
import wtforms_json
from wtforms import (Form, PasswordField, SelectField, StringField,
TextAreaField)
from wtforms.validators import URL, DataRequired, Email
wtforms_json.init()
class LoginForm(Form):
username = StringField(
u'Nome',
validators=[DataRequired()],
render_kw={
'placeholder': 'Usuario',
'tabindex': '1'
})
password = PasswordField(
u'Senha',
validators=[DataRequired()],
render_kw={
'placeholder': 'Senha',
'tabindex': '2'
})
class ContactForm(Form):
formNome = StringField(u'Nome', validators=[DataRequired()])
formEmail = StringField(u'Email', validators=[DataRequired(), Email()])
formTelefone = StringField(u'Telefone (Opcional)')
formMensagem = TextAreaField(u'Mensagem', validators=[DataRequired()])
class PortfolioForm(Form):
title = StringField(
u'Título do trabalho:',
validators=[DataRequired()],
render_kw={'placeholder': 'Data Analysis'})
description = StringField(
u'Descrição:',
validators=[DataRequired()],
render_kw={'placeholder': 'Este projeto trata-se de...'})
imageUrl = StringField(
u'URL de Imagem de Capa:',
validators=[DataRequired(), URL()],
render_kw={'placeholder': 'https://i.imgur.com/u6R67rw.jpg'})
content = TextAreaField(u'Conteúdo', validators=[DataRequired()])
class PortfolioFormSelector(PortfolioForm):
itemSelector = SelectField(u'Selecione o projeto:')
# comment
def fill(self, DBModel):
self.itemSelector.choices = [(x.id, x.title)
for x in DBModel.query.all()]
| {
"content_hash": "bfa03a648ea6861a5cb7e09b6c3c220f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 30.875,
"alnum_prop": 0.6124927703875073,
"repo_name": "saskenuba/simple-flask-blog",
"id": "8866e6d7e7cee3b099b6cce072b3ed2f43ecba0d",
"size": "1733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "martinblog/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12405"
},
{
"name": "HTML",
"bytes": "40484"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "30671"
},
{
"name": "TypeScript",
"bytes": "29243"
}
],
"symlink_target": ""
} |
from lab_assistant.storage.backends.base import StorageBackend
class NullBackend(StorageBackend):
def get(self, key):
return None
def set(self, key, result):
return None
def remove(self, key):
return None
def clear(self):
return None
def list(self):
return []
| {
"content_hash": "53b17cfc34a89e2188d79b663d2d840c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 18.11111111111111,
"alnum_prop": 0.6196319018404908,
"repo_name": "joealcorn/lab_assistant",
"id": "eba300df412280b75ce74749f4ebac761dd42a91",
"size": "326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab_assistant/storage/backends/null.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11799"
}
],
"symlink_target": ""
} |
import sqlalchemy
from nark import DateTime, Timestamp
class UTCDateTime(sqlalchemy.types.TypeDecorator):
"""Store this DateTime object as a UTC timestamp """
impl = sqlalchemy.types.Integer
def process_bind_param(self, value, dialect):
return DateTime.as_timestamp(value)
def process_result_value(self, value, dialect):
return Timestamp.as_datetime(value)
class UTCTime(sqlalchemy.types.TypeDecorator):
"""Store this Time object as a UTC timestamp """
impl = sqlalchemy.types.Integer
def process_bind_param(self, value, dialect):
return DateTime.as_timestamp(value)
def process_result_value(self, value, dialect):
return Timestamp.as_datetime(value) | {
"content_hash": "1d482fcd0955ff67eb8185385efbcf64",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 56,
"avg_line_length": 30.083333333333332,
"alnum_prop": 0.7188365650969529,
"repo_name": "shadowmint/python-nark",
"id": "d5b15f1a52324dcf1f32e773d439facea7e529b9",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nark/pyramid/fields/time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86811"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import timedelta, date
from workalendar.core import WesternCalendar
from workalendar.core import SUN, MON
from workalendar.core import ChristianMixin
class SouthAfrica(WesternCalendar, ChristianMixin):
"South Africa"
include_good_friday = True
include_easter_monday = True
include_christmas = True
include_boxing_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Workers Day"),
(12, 16, "Day of reconcilation"),
)
def get_family_day(self, year):
return (self.get_good_friday(year), "Family Day")
def get_fixed_holidays(self, year):
days = super(SouthAfrica, self).get_fixed_holidays(year)
if year < 1952:
days.append((date(year, 5, 24), "Empire Day"))
if year >= 1952 and year <= 1974:
days.append((date(year, 4, 6), "Van Riebeeck's Day"))
if year >= 1952 and year <= 1979:
days.append((self.get_nth_weekday_in_month(year, 9, MON, 1),
"Settlers' Day"))
if year >= 1952 and year <= 1993:
days.append((date(year, 10, 10), "Kruger Day"))
if year <= 1960:
days.append((date(year, 5, 31), "Union Day"))
if year > 1960 and year <= 1993:
days.append((date(year, 5, 31), "Republic Day"))
if year > 1960 and year <= 1974:
days.append((date(year, 7, 10), "Family Day"))
if year >= 1980 and year <= 1994:
days.append((date(year, 4, 6), "Founder's Day"))
if year >= 1990:
days.append((date(year, 3, 21), 'Human Rights Day'))
if year <= 1993:
days.append((self.get_ascension_thursday(year), "Ascension Day"))
if year >= 1994:
days.append((date(year, 4, 27), "Freedom Day"))
days.append((date(year, 12, 26), "Day of good will"))
if year >= 1995:
days.append((date(year, 6, 16), "Youth Day"))
days.append((date(year, 8, 9), "National Women Day"))
days.append((date(year, 9, 24), "Heritage Day"))
return days
def get_variable_days(self, year):
days = super(SouthAfrica, self).get_variable_days(year)
days.append(self.get_family_day(year))
days += self.get_fixed_holidays(year)
# compute shifting days
for holiday, label in days:
if holiday.weekday() == SUN:
days.append((
holiday + timedelta(days=1),
"%s substitute" % label
))
# Other one-offs. Don't shift these
if year == 1999:
days.append((date(year, 6, 2), "National Elections"))
days.append((date(year, 12, 31), "Y2K"))
if year == 2000:
# 2 January 2000 public holidays to accommodate the Y2K changeover,
# 3 January 2000 because the previous holiday was a Sunday
days.append((date(year, 1, 2), "Y2K"))
days.append((date(year, 1, 3), "Y2K"))
if year == 2001:
days.append((date(year, 1, 2), "Y2K"))
if year == 2004:
days.append((date(year, 4, 14), "National Elections"))
if year == 2006:
days.append((date(year, 3, 1), "Local Elections"))
if year == 2008:
# 2 May 2008 was declared a public holiday when Human Rights Day
# and Good Friday coincided on 21 March 2008
days.append((date(year, 5, 2), "Special Human Rights"))
if year == 2009:
days.append((date(year, 4, 22), "National Elections"))
if year == 2011:
days.append((date(year, 5, 18), "Local Elections"))
days.append((date(year, 12, 27), "Special Day of Goodwill"))
if year == 2014:
days.append((date(year, 5, 7), "National Elections"))
if year == 2016:
days.append((date(year, 8, 3), "Local Elections"))
return days
| {
"content_hash": "de7a09d18aa6df63ee94e39b3b40d1ca",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 41.55102040816327,
"alnum_prop": 0.5530451866404715,
"repo_name": "sayoun/workalendar",
"id": "06141f7d1ed98dffcdc922c4a16bdfa8c82f8bd3",
"size": "4096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workalendar/africa/south_africa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "383844"
}
],
"symlink_target": ""
} |
import os
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
"""Get the environment variable or return exception."""
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the {} environment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# need to call os.path.dirname to go up one level in the path (like ../)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
JINJA2_DIR = os.path.join(BASE_DIR, 'templates/jinja2')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = set in local_settings.py
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'aggregator.apps.AggregatorConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'termsearch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [ JINJA2_DIR,
],
'APP_DIRS': True,
'OPTIONS': {
'environment': 'termsearch.jinja2utils.environment',
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'termsearch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# imported from local_settings.py
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# top level static directory
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# ckeditor WYSIWYG stuff
MEDIA_URL = '/media/'
CKEDITOR_UPLOAD_PATH= 'uploads/'
# Logging settings
# Overwrite the default settings
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'development_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/django_dev.log'),
'filters': ['require_debug_true'],
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'production_logfile': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(BASE_DIR,'logs/django_production.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'simple',
},
'dba_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR,'logs/django_dba.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'security_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(BASE_DIR, 'logs/django_security.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'aggregator': {
'handlers': ['console', 'development_logfile', 'production_logfile'],
'level': 'DEBUG',
},
'dba': {
'handlers': ['console', 'dba_logfile'],
},
'django': {
'handlers': ['console', 'development_logfile', 'production_logfile'],
},
'django.security': {
'handlers': ['console', 'security_logfile'],
'propagate': False,
},
'py.warnings': {
'handlers': ['console', 'development_logfile'],
},
}
}
| {
"content_hash": "ba5cee516757e3375cc4e7f2be800e62",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 99,
"avg_line_length": 29.366812227074234,
"alnum_prop": 0.5727881040892193,
"repo_name": "olivmaurel/termsearch",
"id": "75907dc0177271551cc87e45732e9d293e58413c",
"size": "6725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "termsearch/settings/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12338"
},
{
"name": "HTML",
"bytes": "9460"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "52696"
}
],
"symlink_target": ""
} |
import wx
import wx.lib.ogl as ogl
class Dialog_text_edit(wx.Dialog):
def __init__(self, parent, title):
super(Dialog_text_edit, self).__init__(parent, title=title,size=(240,200))
self.parent = parent
self.texto = wx.TextCtrl( self, wx.ID_ANY, style=wx.TE_MULTILINE , size=(220,120), pos=(10,10))
aceptar = wx.Button( self, wx.ID_ANY, u"Aceptar",pos=(10,140))
aceptar.Bind(wx.EVT_BUTTON,self.text)
self.label_content()
def text(self,evt):
r = self.texto.GetValue()
self.parent.modify_label(r,10,self.type_shape)
self.Destroy()
def label_content(self):
id_shape = self.parent.shape_selected.GetId()
self.type_shape = ''
for data in self.parent.diccionary_shapes_info:
if data[1] == id_shape:
self.texto.SetValue(str(data[3]))
type_shape = str(data[2]) | {
"content_hash": "ec80463fe78a5d6b79574cdaf9c5e5e0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 97,
"avg_line_length": 26.733333333333334,
"alnum_prop": 0.6733167082294265,
"repo_name": "pacpac1992/mymockup",
"id": "cfdde5d3db1cf9e0fe41e11b7f8e2421da833d9a",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/widgets/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68507"
}
],
"symlink_target": ""
} |
from .ast_node import AstNode
from .errors import AstBuilderException
class AstBuilder(object):
def __init__(self):
self.reset()
def reset(self):
self.stack = [AstNode('None')]
self.comments = []
def start_rule(self, rule_type):
self.stack.append(AstNode(rule_type))
def end_rule(self, rule_type):
node = self.stack.pop()
self.current_node.add(node.rule_type, self.transform_node(node))
def build(self, token):
if token.matched_type == 'Comment':
self.comments.append({
'type': 'Comment',
'location': self.get_location(token),
'text': token.matched_text
})
else:
self.current_node.add(token.matched_type, token)
def get_result(self):
return self.current_node.get_single('Feature')
@property
def current_node(self):
return self.stack[-1]
def get_location(self, token, column=None):
return (token.location if not column else
{'line': token.location['line'], 'column': column})
def get_tags(self, node):
tags = []
tags_node = node.get_single('Tags')
if not tags_node:
return tags
for token in tags_node.get_tokens('TagLine'):
tags += [{'type': 'Tag',
'location': self.get_location(token, tag_item['column']),
'name': tag_item['text']} for tag_item in token.matched_items]
return tags
def get_table_rows(self, node):
rows = [{'type': 'TableRow',
'location': self.get_location(token),
'cells': self.get_cells(token)} for token in node.get_tokens('TableRow')]
self.ensure_cell_count(rows)
return rows
def ensure_cell_count(self, rows):
if not rows:
return
cell_count = len(rows[0]['cells'])
for row in rows:
if len(row['cells']) != cell_count:
raise AstBuilderException("inconsistent cell count within the table",
row['location'])
def get_cells(self, table_row_token):
return [{'type': 'TableCell',
'location': self.get_location(table_row_token, cell_item['column']),
'value': cell_item['text']} for cell_item in table_row_token.matched_items]
def get_description(self, node):
return node.get_single('Description')
def get_steps(self, node):
return node.get_items('Step')
def transform_node(self, node):
if node.rule_type == 'Step':
step_line = node.get_token('StepLine')
step_argument = None
if node.get_single('DataTable'):
step_argument = node.get_single('DataTable')
elif node.get_single('DocString'):
step_argument = node.get_single('DocString')
return self.reject_nones({
'type': node.rule_type,
'location': self.get_location(step_line),
'keyword': step_line.matched_keyword,
'text': step_line.matched_text,
'argument': step_argument
})
elif node.rule_type == 'DocString':
separator_token = node.get_tokens('DocStringSeparator')[0]
content_type = separator_token.matched_text if len(separator_token.matched_text) > 0 else None
line_tokens = node.get_tokens('Other')
content = '\n'.join([t.matched_text for t in line_tokens])
return self.reject_nones({
'type': node.rule_type,
'location': self.get_location(separator_token),
'contentType': content_type,
'content': content
})
elif node.rule_type == 'DataTable':
rows = self.get_table_rows(node)
return self.reject_nones({
'type': node.rule_type,
'location': rows[0]['location'],
'rows': rows,
})
elif node.rule_type == 'Background':
background_line = node.get_token('BackgroundLine')
description = self.get_description(node)
steps = self.get_steps(node)
return self.reject_nones({
'type': node.rule_type,
'location': self.get_location(background_line),
'keyword': background_line.matched_keyword,
'name': background_line.matched_text,
'description': description,
'steps': steps
})
elif node.rule_type == 'Scenario_Definition':
tags = self.get_tags(node)
scenario_node = node.get_single('Scenario')
if scenario_node:
scenario_line = scenario_node.get_token('ScenarioLine')
description = self.get_description(scenario_node)
steps = self.get_steps(scenario_node)
return self.reject_nones({
'type': scenario_node.rule_type,
'tags': tags,
'location': self.get_location(scenario_line),
'keyword': scenario_line.matched_keyword,
'name': scenario_line.matched_text,
'description': description,
'steps': steps
})
else:
scenario_outline_node = node.get_single('ScenarioOutline')
if not scenario_outline_node:
raise RuntimeError('Internal grammar error')
scenario_outline_line = scenario_outline_node.get_token('ScenarioOutlineLine')
description = self.get_description(scenario_outline_node)
steps = self.get_steps(scenario_outline_node)
examples = scenario_outline_node.get_items('Examples_Definition')
return self.reject_nones({
'type': scenario_outline_node.rule_type,
'tags': tags,
'location': self.get_location(scenario_outline_line),
'keyword': scenario_outline_line.matched_keyword,
'name': scenario_outline_line.matched_text,
'description': description,
'steps': steps,
'examples': examples
})
elif node.rule_type == 'Examples_Definition':
tags = self.get_tags(node)
examples_node = node.get_single('Examples')
examples_line = examples_node.get_token('ExamplesLine')
description = self.get_description(examples_node)
rows = self.get_table_rows(examples_node)
return self.reject_nones({
'type': examples_node.rule_type,
'tags': tags,
'location': self.get_location(examples_line),
'keyword': examples_line.matched_keyword,
'name': examples_line.matched_text,
'description': description,
'tableHeader': rows[0],
'tableBody': rows[1:]
})
elif node.rule_type == 'Description':
line_tokens = node.get_tokens('Other')
# Trim trailing empty lines
last_non_empty = next(i for i, j in reversed(list(enumerate(line_tokens)))
if j.matched_text)
description = '\n'.join([token.matched_text for token in
line_tokens[:last_non_empty + 1]])
return description
elif node.rule_type == 'Feature':
header = node.get_single('Feature_Header')
if not header:
return
tags = self.get_tags(header)
feature_line = header.get_token('FeatureLine')
if not feature_line:
return
background = node.get_single('Background')
scenario_definitions = node.get_items('Scenario_Definition')
description = self.get_description(header)
language = feature_line.matched_gherkin_dialect
return self.reject_nones({
'type': node.rule_type,
'tags': tags,
'location': self.get_location(feature_line),
'language': language,
'keyword': feature_line.matched_keyword,
'name': feature_line.matched_text,
'description': description,
'background': background,
'scenarioDefinitions': scenario_definitions,
'comments': self.comments
})
else:
return node
def reject_nones(self, values):
return {k: v for k, v in values.items() if v is not None} # only None should be rejected
| {
"content_hash": "0299da1636a806704eb243a69a7a6879",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 107,
"avg_line_length": 39.629464285714285,
"alnum_prop": 0.5315985130111525,
"repo_name": "chebizarro/gherkin3",
"id": "91aa1e4f272637d193648b343978b42dbe82fa69",
"size": "8877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/gherkin3/ast_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C#",
"bytes": "182953"
},
{
"name": "Cucumber",
"bytes": "5413"
},
{
"name": "Go",
"bytes": "162390"
},
{
"name": "HTML",
"bytes": "261"
},
{
"name": "JSONiq",
"bytes": "1060"
},
{
"name": "Java",
"bytes": "156637"
},
{
"name": "JavaScript",
"bytes": "237821"
},
{
"name": "Makefile",
"bytes": "15776"
},
{
"name": "Python",
"bytes": "105904"
},
{
"name": "Ruby",
"bytes": "102526"
},
{
"name": "Shell",
"bytes": "1335"
}
],
"symlink_target": ""
} |
from taiga.base.api import serializers
from taiga.base.fields import MethodField
class VoteResourceSerializerMixin(serializers.LightSerializer):
is_voter = MethodField()
total_voters = MethodField()
def get_is_voter(self, obj):
# The "is_voted" attribute is attached in the get_queryset of the viewset.
return getattr(obj, "is_voter", False) or False
def get_total_voters(self, obj):
# The "total_voters" attribute is attached in the get_queryset of the viewset.
return getattr(obj, "total_voters", 0) or 0
| {
"content_hash": "076bc45df91b0f86e5d0c6c217fd6619",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 37.266666666666666,
"alnum_prop": 0.7066189624329159,
"repo_name": "mattcongy/itshop",
"id": "9f9d10498405ebe87851f9ca1ca7a05f9c8d2fcf",
"size": "1495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker-images/taigav2/taiga-back/taiga/projects/votes/mixins/serializers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103474"
},
{
"name": "CoffeeScript",
"bytes": "3380"
},
{
"name": "HTML",
"bytes": "274547"
},
{
"name": "JavaScript",
"bytes": "203660"
},
{
"name": "Nginx",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "3591150"
},
{
"name": "Ruby",
"bytes": "164978"
},
{
"name": "Shell",
"bytes": "5238"
}
],
"symlink_target": ""
} |
'''
Created on 22-08-2012
@author: piotrhol
'''
__author__ = "piotrhol"
__copyright__ = "PNSC (@@check)"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
import rdflib
import mimetypes
from rocommand import ro_uriutils
from rocommand.ro_remote_metadata import ROSRS_Error
log = logging.getLogger(__name__)
ACTION_AGGREGATE_INTERNAL = 1
ACTION_AGGREGATE_EXTERNAL = 2
ACTION_AGGREGATE_ANNOTATION = 3
ACTION_UPDATE_OVERWRITE = 4
ACTION_UPDATE = 5
ACTION_UPDATE_ANNOTATION = 6
ACTION_SKIP = 7
ACTION_DELETE = 8
ACTION_DELETE_ANNOTATION = 9
ACTION_ERROR = 10
def pushResearchObject(localRo, remoteRo, force = False):
'''
Scans a given RO version directory for files that have been modified since last synchronization
and pushes them to ROSRS. Modification is detected by checking modification times and checksums.
'''
push = PushResearchObject(localRo, remoteRo, force)
for (action, uri) in push.push():
yield (action, uri)
return
class PushResearchObject:
def __init__(self, localRo, remoteRo, force = False):
self._localRo = localRo
self._remoteRo = remoteRo
self._force = force
def push(self):
mimetypes.init()
for localResuri in self._localRo.getAggregatedResources():
for (action, uri) in self.__uploadLocalResource(localResuri):
yield (action, uri)
self._remoteRo.reloadManifest()
for resuri in self._remoteRo.getAggregatedResources():
for (action, uri) in self.__checkRemoteResource(resuri):
yield (action, uri)
self._remoteRo.reloadManifest()
for (ann_node, ann_body, ann_target) in self._localRo.getAllAnnotationNodes():
for (action, uri) in self.__uploadLocalAnnotation(ann_node, ann_body, ann_target):
yield (action, uri)
self._remoteRo.reloadManifest()
for (ann_node, ann_body, ann_target) in self._remoteRo.getAllAnnotationNodes():
for (action, uri) in self.__checkRemoteAnnotation(ann_node):
yield (action, uri)
self._remoteRo.reloadManifest()
self._localRo.saveRegistries()
return
def __uploadLocalResource(self, localResuri):
try:
respath = self._localRo.getComponentUriRel(localResuri)
if not self._remoteRo.isAggregatedResource(respath):
for (action, uri) in self.__createResource(localResuri, respath):
yield (action, uri)
else:
for (action, uri) in self.__updateResource(localResuri, respath):
yield (action, uri)
except Exception as e:
log.error("Error when processing resource %s: %s"%(localResuri, e))
yield (ACTION_ERROR, e)
def __createResource(self, localResuri, respath):
log.debug("ResourceSync.pushResearchObject: %s does was not aggregated in the remote RO"%(respath))
if self._localRo.isInternalResource(localResuri):
log.debug("ResourceSync.pushResearchObject: %s is internal"%(localResuri))
if self._localRo.isAnnotationNode(respath):
# annotations are handled separately
pass
else:
yield (ACTION_AGGREGATE_INTERNAL, respath)
filename = ro_uriutils.getFilenameFromUri(localResuri)
currentChecksum = self._localRo.calculateChecksum(filename)
rf = open(filename, 'r')
(status, reason, headers, resuri) = self._remoteRo.aggregateResourceInt(
respath,
mimetypes.guess_type(respath)[0],
rf)
self._localRo.getRegistries()["%s,etag"%filename] = headers.get("etag", None)
self._localRo.getRegistries()["%s,checksum"%filename] = currentChecksum
elif self._localRo.isExternalResource(localResuri):
log.debug("ResourceSync.pushResearchObject: %s is external"%(localResuri))
yield (ACTION_AGGREGATE_EXTERNAL, respath)
self._localRo.aggregateResourceExt(respath)
else:
log.error("ResourceSync.pushResearchObject: %s is neither internal nor external"%(localResuri))
def __updateResource(self, localResuri, respath):
log.debug("ResourceSync.pushResearchObject: %s does was already aggregated in the remote RO"%(respath))
if self._localRo.isInternalResource(localResuri):
log.debug("ResourceSync.pushResearchObject: %s is internal"%(localResuri))
if self._localRo.isAnnotationNode(respath):
# annotations are handled separately
pass
else:
log.debug("ResourceSync.pushResearchObject: %s is a resource"%(localResuri))
# Get remote ETag
(status, reason, headers) = self._remoteRo.getHead(respath)
if status != 200:
raise Exception("Error retrieving RO resource", "%03d %s (%s)"%(status, reason, respath))
filename = ro_uriutils.getFilenameFromUri(localResuri)
currentETag = headers.get("etag", None)
currentChecksum = self._localRo.calculateChecksum(filename)
# Check locally stored ETag
previousETag = self._localRo.getRegistries().get("%s,etag"%filename, None)
previousChecksum = self._localRo.getRegistries().get("%s,checksum"%filename, None)
if not previousETag or previousETag != currentETag or not previousChecksum or previousChecksum != currentChecksum:
rf = open(ro_uriutils.getFilenameFromUri(localResuri), 'r')
try:
(status, reason, headers, resuri) = self._remoteRo.updateResourceInt(respath,
mimetypes.guess_type(localResuri)[0],
rf)
self._localRo.getRegistries()["%s,etag"%filename] = headers.get("etag", None)
self._localRo.getRegistries()["%s,checksum"%filename] = currentChecksum
if not previousETag or previousETag != currentETag:
log.debug("ResourceSync.pushResearchObject: %s has been modified in ROSRS (ETag was %s is %s)"%(respath, previousETag, currentETag))
yield (ACTION_UPDATE_OVERWRITE, respath)
elif not previousChecksum or previousChecksum != currentChecksum:
log.debug("ResourceSync.pushResearchObject: %s has been modified locally (checksum was %s is %s)"%(respath, previousChecksum, currentChecksum))
yield (ACTION_UPDATE, respath)
except ROSRS_Error as e:
yield (ACTION_ERROR, e)
else:
log.debug("ResourceSync.pushResearchObject: %s has NOT been modified"%(respath))
yield (ACTION_SKIP, respath)
elif self._localRo.isExternalResource(localResuri):
log.debug("ResourceSync.pushResearchObject: %s is external"%(localResuri))
yield (ACTION_SKIP, localResuri)
else:
log.error("ResourceSync.pushResearchObject: %s is neither internal nor external"%(localResuri))
def __checkRemoteResource(self, resuri):
respath = self._remoteRo.getComponentUriRel(resuri)
if not self._localRo.isAggregatedResource(respath):
if self._remoteRo.isAnnotationNode(respath):
# annotations are handled separately
pass
else:
log.debug("ResourceSync.pushResearchObject: %s will be deaggregated"%(resuri))
try:
self._remoteRo.deaggregateResource(resuri)
yield (ACTION_DELETE, resuri)
except ROSRS_Error as e:
yield (ACTION_ERROR, e)
def __uploadLocalAnnotation(self, ann_node, ann_body, ann_target):
annpath = self._localRo.getComponentUriRel(ann_node)
bodypath = self._localRo.getComponentUriRel(ann_body)
targetpath = self._localRo.getComponentUriRel(ann_target)
if isinstance(ann_node, rdflib.BNode) or not self._remoteRo.isAnnotationNode(annpath):
log.debug("ResourceSync.pushResearchObject: %s is a new annotation"%(annpath))
try:
(_, _, remote_ann_node_uri) = self._remoteRo.addAnnotationNode(bodypath, targetpath)
remote_ann_node_path = self._remoteRo.getComponentUriRel(remote_ann_node_uri)
self._localRo.replaceUri(ann_node, self._localRo.getComponentUriAbs(remote_ann_node_path))
yield (ACTION_AGGREGATE_ANNOTATION, remote_ann_node_path)
except ROSRS_Error as e:
yield (ACTION_ERROR, e)
else:
log.debug("ResourceSync.pushResearchObject: %s is an existing annotation"%(annpath))
self._remoteRo.updateAnnotationNode(annpath, bodypath, targetpath)
yield (ACTION_UPDATE_ANNOTATION, ann_node)
def __checkRemoteAnnotation(self, ann_node):
annpath = self._remoteRo.getComponentUriRel(ann_node)
if not self._localRo.isAnnotationNode(annpath):
log.debug("ResourceSync.pushResearchObject: annotation %s will be deleted"%(ann_node))
try:
self._remoteRo.deleteAnnotationNode(ann_node)
yield (ACTION_DELETE_ANNOTATION, ann_node)
except ROSRS_Error as e:
yield (ACTION_ERROR, e)
pass
def pushZipRO(localRo, remoteRo, force = False):
return
| {
"content_hash": "a827a3346a77a849732090939886c386",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 171,
"avg_line_length": 49.56218905472637,
"alnum_prop": 0.6024894599478017,
"repo_name": "wf4ever/ro-manager",
"id": "32b5bb5d3576741e6c32803708a755b028326cac",
"size": "9962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rocommand/ro_rosrs_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7498"
},
{
"name": "HTML",
"bytes": "222435"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "964141"
},
{
"name": "Shell",
"bytes": "39373"
},
{
"name": "TeX",
"bytes": "21071"
}
],
"symlink_target": ""
} |
from alembic.ddl.base import RenameTable
from alembic import migration
from redshift_sqlalchemy import dialect
def test_configure_migration_context():
context = migration.MigrationContext.configure(
url='redshift+psycopg2://mydb'
)
assert isinstance(context.impl, dialect.RedshiftImpl)
def test_rename_table():
compiler = dialect.RedShiftDDLCompiler(dialect.RedshiftDialect(), None)
sql = compiler.process(RenameTable("old", "new", "scheme"))
assert sql == 'ALTER TABLE scheme."old" RENAME TO "new"'
| {
"content_hash": "ab11d47001649715189c9ba8cdedbb75",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 31.58823529411765,
"alnum_prop": 0.7374301675977654,
"repo_name": "jklukas/redshift_sqlalchemy",
"id": "a957b3e4f496ff445e664c4019c8cb17358851a8",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_alembic_dialect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80348"
}
],
"symlink_target": ""
} |
"""Use pygame to visualize well_bouncer_game.Game."""
import pygame
import well_bouncer_game
_screen = None
_PLAY_AREA_WIDTH = 200
_PLAY_AREA_HEIGHT = 800
_BORDER_WIDTH = 10
_PLAY_AREA_SCALE = 8
def game_to_screen_point(x, y):
x = float(x) * _PLAY_AREA_SCALE + _BORDER_WIDTH
y = float(y) * _PLAY_AREA_SCALE
return int(x), _PLAY_AREA_HEIGHT + _BORDER_WIDTH - int(y)
def scale_to_screen(p):
p = float(p) * _PLAY_AREA_SCALE
return int(p)
class InteractiveMoveMaker(well_bouncer_game.MoveMaker):
def make_move(self, state) -> well_bouncer_game.Direction:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
return well_bouncer_game.Direction.LEFT
elif event.key == pygame.K_RIGHT:
return well_bouncer_game.Direction.RIGHT
return well_bouncer_game.Direction.CENTER
def _initialize_screen():
global _screen
pygame.init()
pygame.font.init()
size = 220, 820
_screen = pygame.display.set_mode(size, pygame.SRCALPHA)
pygame.key.set_repeat(1, 50)
def play_game(title, game, move_maker: well_bouncer_game.MoveMaker):
if _screen is None:
_initialize_screen()
pygame.display.set_caption(title)
score_font = pygame.font.SysFont("Consolas", 60)
clock = pygame.time.Clock()
score_width = 0
while not game.done:
direction = move_maker.make_move(game.state)
for event in pygame.event.get():
pass
game.move(direction)
_screen.fill((0, 0, 0))
move_distribution = move_maker.move_probabilities(game.state)
if move_distribution is not None:
for i in range(well_bouncer_game.Game.NUM_ACTIONS):
probability = move_distribution.get(i, 0.0)
c = pygame.Color(0)
c.hsva = (60 * i, 100, int(100 * probability), 20)
pygame.draw.rect(_screen, c,
pygame.Rect(66 * i, 0, 66 * i + 66, 8))
score_surface = score_font.render(str(int(game.score)), False,
(255, 255, 0))
score_surface.set_alpha(64)
new_score_width = score_surface.get_rect().width
if new_score_width > score_width:
score_width = new_score_width + 10
_screen.blit(score_surface,
((_screen.get_rect().width - score_width) // 2, 10))
pygame.draw.circle(
_screen,
(255, 0, 0),
game_to_screen_point(game.ball_x, game.ball_y),
scale_to_screen(game.ball_radius),
)
pygame.draw.circle(
_screen,
(0, 0, 255),
game_to_screen_point(game.paddle_x, game.paddle_y),
scale_to_screen(game.paddle_radius),
)
pygame.draw.rect(
_screen,
(255, 255, 255),
pygame.Rect(
0,
# Start off screen so that there is no top border
-_BORDER_WIDTH,
_PLAY_AREA_WIDTH + 2 * _BORDER_WIDTH,
# 3 * _BORDER_WIDTH because the rectangle started off screen.
_PLAY_AREA_HEIGHT + 3 * _BORDER_WIDTH),
_BORDER_WIDTH)
pygame.display.flip()
clock.tick(60)
| {
"content_hash": "d9a11da09acb8d95ebb79f685de9ae61",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 77,
"avg_line_length": 31.72641509433962,
"alnum_prop": 0.5560511448111805,
"repo_name": "brianquinlan/learn-machine-learning",
"id": "cbc04090719119e78da1117b0fa968b858a0f090",
"size": "3941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "well_bouncer/pygame_well_bouncer_player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32669"
},
{
"name": "Rust",
"bytes": "4163"
}
],
"symlink_target": ""
} |
import os
import shutil
from PyQt5.Qt import QUuid, QDir, QFile, QFileInfo, QObject, Qt, QTextStream, QIODevice, QUrl, QByteArray, QMessageBox, QApplication
class Utility:
Casing_Uppercase = 0 # Change characters to uppercase
Casing_Lowercase = 1 # Change characters to lowercase
Casing_Titlecase = 2 # Change first character of each word to uppercase, reset to lowercase.
Casing_Capitalize = 3 # Change first character of sentence to uppercase, rest to lowercase.
@staticmethod
def CreateUUID():
# Uses QUuid to generate a random UUID but also removes
# the curly braces that QUuid::createUuid() adds
return QUuid.createUuid().toString().replace('{', '').replace('}', '')
@staticmethod
def ChageCase(text, casing):
# Convert the casing of the text, returning the result.
if not text:
return text
if casing is Utility.Casing_Lowercase:
return text.lower()
elif casing is Utility.Casing_Uppercase:
return text.upper()
elif casing is Utility.Casing_Titlecase:
txt = ''
for i in text.split(' '):
if i:
txt += i[0].upper() + i[1:] + ' '
else: # Sapce
txt += ' '
return txt[:-1]
elif casing is Utility.Casing_Capitalize:
txt = ''
flag = False
for i in text.split(' '):
if i:
if flag:
txt += i
else:
flag = True
txt += i[0].upper() + i[1:] + ' '
else:
txt += ' '
return txt[:-1]
else:
return text
@staticmethod
def IsMixedCase(strings):
# Returns true if the string is mixed case, false otherwise.
# For instance, "test" and "TEST" return false, "teSt" returns true.
# If the string is empty, returns false.
if not strings or len(strings) == 1:
return False
flags = strings[0].isupper()
for i in strings:
if i.isupper() is not flags:
return True
return False
@staticmethod
def Substring(start_index, end_index, strings):
# Returns a substring of a specified string;
# the characters included are in the interval:
# [ start_index, end_index >
return strings[start_index:end_index]
@staticmethod
def SubstringRef(start_index, end_index, strings):
# Returns a substring of a specified string;
# the characters included are in the interval:
# [ start_index, end_index >
#? return string.midRef(start_index, end_index - start_index);
return strings[start_index:end_index]
@staticmethod
def ReplaceFirst(before, after, strings):
# Replace the first occurrence of string "before"
# with string "after" in string "string"
start_index = strings.index(before)
end_index = start_index + len(before)
print(start_index, end_index)
return Utility.Substring(0, start_index, strings) + after + Utility.Substring(end_index, len(strings), strings)
@staticmethod
def GetAbsolutePathsToFolderDescendantFiles(fullfolderpath):
fileList = []
for root, dirs, files in os.walk(fullfolderpath):
for i in files:
if not i.startswith('.') and not i.startswith('..'):
f = os.path.join(root, i)
if os.path.isfile(f):
fileList.append(os.path.join(root, i))
return fileList
@staticmethod
def CopyFiles(fullfolderpath_source, fullfolderpath_destination):
# Copies every file and folder in the source folder
# to the destination folder; the paths to the folders are submitted;
# the destination folder needs to be created in advance
try:
if os.path.exists(fullfolderpath_destination):
shutil.rmtree(fullfolderpath_destination)
shutil.copytree(fullfolderpath_source, fullfolderpath_destination)
except IOError as e:
print('Failed with CopyFiles()', e)
@staticmethod
def DeleteFile(fullfilepath):
# Deletes the specified file if it exists
if os.path.exists(fullfilepath):
try:
os.remove(fullfilepath)
return True
except IOError as e:
print('Failed remove file', e)
return False
@staticmethod
def RenameFile(oldfilepath, newfilepath):
#?
try:
os.rename(oldfilepath, newfilepath)
return True
except:
return False
@staticmethod
def GetTemporaryFileNameWithExtension(extension):
# Returns path to a random filename with the specified extension in
# the systems TEMP directory. The caller has responsibility for
# creating a file at this location and removing it afterwards.
return QDir.temp().absolutePath() + "/tolo_" + Utility.CreateUUID() + extension
@staticmethod
def CreateTemporaryCopy(fullfilepath):
# Creates a copy of the provided file with a random name in
# the systems TEMP directory and returns the full path to the new file.
# The extension of the original file is preserved. If the original file
# doesn't exist, an empty string is returned.
if not QFileInfo(fullfilepath).exists() or not QFileInfo(fullfilepath).isFile():
return ''
temp_file_path = QDir.temp().absolutePath() + "/" + \
Utility.CreateUUID() + "." + \
QFileInfo(fullfilepath).suffix()
QFile.copy(fullfilepath, temp_file_path)
return temp_file_path
@staticmethod
def IsFileReadable(fullfilepath):
# Returns true if the file can be read;
# shows an error dialog if it can't
# with a message elaborating what's wrong
# Qt has <QFileInfo>.exists() and <QFileInfo>.isReadable()
# functions, but then we would have to create our own error
# message for each of those situations (and more). Trying to
# actually open the file enables us to retrieve the exact
# reason preventing us from reading the file in an error string.
file = QFile(fullfilepath)
# Check if we can open the file
if not file.open(QFile.ReadOnly):
Utility.DisplayStdErrorDialog(QObject.tr("Cannot read file %s:\n%s.") % (fullfilepath, file.errorString()))
return False
file.close()
return True
@staticmethod
def ReadUnicodeTextFile(fullfilepath):
# Reads the text file specified with the full file path;
# text needs to be in UTF-8 or UTF-16; if the file cannot
# be read, an error dialog is shown and an empty string returned
file = QFile(fullfilepath)
#? Check if we can open the file
if not file.open(QFile.ReadOnly):
print('Can not read the file', fullfilepath)
return
ins = QTextStream(file)
ins.setCodec('UTF-8') # Input should be UTF-8
# This will automatically switch reading from
# UTF-8 to UTF-16 if a BOM is detected
ins.setAutoDetectUnicode(True)
return Utility.ConvertLineEndings(ins.readAll())
@staticmethod
def WriteUnicodeTextFile(text, fullfilepath):
# Writes the provided text variable to the specified
# file; if the file exists, it is truncated
#?
file = QFile(fullfilepath)
if not file.open(QIODevice.WriteOnly | QIODevice.Truncate | QIODevice.Text):
print('Can not read the file', fullfilepath)
return
out = QTextStream(file)
# We ALWAYS output in UTF-8
out.setCodec('UTF-8')
out << text
@staticmethod
def ConvertLineEndings(text):
# Converts Mac and Windows style line endings to Unix style
# line endings that are expected throughout the Qt framework
#?
return text.replace('\x0D\x0A', '\x0A').replace('\x0D', '\x0A')
@staticmethod
def URLEncodePath(path):
# URL encodes the provided path string.
# The path separator ('/') and the ID hash ('#') are left alone.
# @param path The path to encode.
# @return The encoded path string.
encoded_url = QUrl.toPercentEncoding(path, QByteArray('/#'))
return encoded_url.constData()
@staticmethod
# def DisplayExceptionErrorDialog(error_info):
# message_box = QApplication.activeWindow()
# message_box.setWindowFlags(Qt.Window | Qt.WindowStaysOnTopHint)
# message_box.setModal(True)
# message_box.setIcon(QMessageBox.Critical)
# message_box.setWindowTitle('Tolo')
# # Spaces are added to the end because otherwise the dialog is too small.
# message_box.setText(QObject.tr("Tolo has encountered a problem.") % "");
# message_box.setInformativeText(QObject.tr("Please <a href=\"http://code.tolo.com/p/tolo/wiki/ReportingIssues\">report it</a> on the issue tracker, including the details from this dialog."));
# message_box.setStandardButtons(QMessageBox.Close)
# detailed_text = []
# QStringList detailed_text;
# detailed_text << "Error info: " + error_info
# << "Sigil version: " + QString(SIGIL_FULL_VERSION)
# << "Runtime Qt: " + QString(qVersion())
# << "Compiled Qt: " + QString(QT_VERSION_STR);
# #if defined Q_OS_WIN32
# detailed_text << "Platform: Windows SysInfo ID " + QString::number(QSysInfo::WindowsVersion);
# #elif defined Q_OS_MAC
# detailed_text << "Platform: Mac SysInfo ID " + QString::number(QSysInfo::MacintoshVersion);
# #else
# detailed_text << "Platform: Linux";
# #endif
# message_box.setDetailedText(detailed_text.join("\n"));
# message_box.exec();
@staticmethod
def DisplayStdErrorDialog(error_message, detailed_text):
message_box = QMessageBox(QApplication.activeWindow())
message_box.setWindowFlags(Qt.Window | Qt.WindowStaysOnTopHint)
message_box.setModal(True)
message_box.setIcon(QMessageBox.Critical)
message_box.setWindowTitle("tolo")
message_box.setText(error_message)
if not detailed_text:
message_box.setDetailedText(detailed_text)
message_box.setStandardButtons(QMessageBox.Close)
message_box.exec()
if __name__ == '__main__':
print(Utility.CreateUUID())
print(Utility.ChageCase(' title abce f ', Utility.Casing_Titlecase))
print(Utility.ChageCase(' title abce f ', Utility.Casing_Capitalize))
print(Utility.IsMixedCase('ABC'))
print(Utility.ReplaceFirst('b', 'f', 'abcdefg'))
print(Utility.GetAbsolutePathsToFolderDescendantFiles('/Users/mou/Downloads'))
print(Utility.DeleteFile('/Users/mou/main.o'))
print('rename', Utility.RenameFile('/Users/mou/main.o', '/Users/mou/out.o'))
print('gettempext', Utility.GetTemporaryFileNameWithExtension("test"))
print('createtempcopy', Utility.CreateTemporaryCopy('/Users/mou/out.o'))
print('stderrdlg', Utility.DisplayStdErrorDialog('err', 'detailed')) | {
"content_hash": "2413784b4427bf58a79be36af4f2fef7",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 200,
"avg_line_length": 39.81184668989547,
"alnum_prop": 0.6165762296516716,
"repo_name": "tcp813/tolo",
"id": "92b05aa1d612bb4b9c1874b6559dca8c6d0c9540",
"size": "11427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Misc/Utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "571843"
}
],
"symlink_target": ""
} |
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
FILER_DEBUG = True
SITE_ID = 1
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
USE_I18N = True
ROOT_URLCONF = 'cmsplugin_blog_authors.tests.urls'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../../static/')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../../media/')
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), 'test_static'),
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '../templates'),
)
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
os.path.dirname(__file__), 'coverage')
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$', 'cms$', 'cmsplugin_blog$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
'sekizai',
'menus',
'mptt',
'tagging',
'filer',
'people',
]
# If we leave this in EXTERNAL_APPS it will hide this app from coverage
NO_COVERAGE_APPS = [
'cms',
'cmsplugin_blog',
]
INTERNAL_APPS = [
'cmsplugin_blog_authors.tests.test_app',
'cmsplugin_blog_authors',
]
INSTALLED_APPS = EXTERNAL_APPS + NO_COVERAGE_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
SECRET_KEY = 'foobar'
CMS_SOFTROOT = True
CMS_PERMISSION = False
CMS_SEO_FIELDS = True
CMS_MENU_TITLE_OVERWRITE = True
CMS_FRONTEND_LANGUAGES = ('en', 'de', )
CMS_TEMPLATES = (
('standard.html', 'Standard'),
)
JQUERY_JS = 'https://ajax.googleapis.com/ajax/libs/jquery/1.4.4/jquery.min.js'
JQUERY_UI_JS = (
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.12/jquery-ui.min.js')
JQUERY_UI_CSS = (
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.12/themes/smoothness/'
'jquery-ui.css')
| {
"content_hash": "e9e710c302da4d81e1bef52779f04a31",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 78,
"avg_line_length": 25.26851851851852,
"alnum_prop": 0.6665445218028582,
"repo_name": "bitmazk/cmsplugin-blog-authors",
"id": "7e4c9bcc19a994765eb76f8fd5979127a1ec0c90",
"size": "2729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_blog_authors/tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17990"
}
],
"symlink_target": ""
} |
from .about import about, contact
from .alerts import alertUsers, postAlertPolygon, readAlertPoint
from .disclaimer import disclaimer
from .edit import editHazards, editShape, updateHazard
from .index import index
from .postPoint import (postHazard, postIncident, postNearmiss,
postNewInfrastructure, postTheft)
from .pushNotification import pushNotification
from .recentReports import recentReports
from .restApi import (AlertAreaDetail, AlertAreaList, APNSDeviceDetail,
APNSDeviceList, CollisionList, FilteredHazardList,
FilteredTheftList, GCMDeviceDetail, GCMDeviceList,
HazardList, IncidentList, NearmissList, OfficialList,
TheftList, TinyCollisionList, TinyHazardList,
TinyNearMissList, TinyNewInfrastructureList,
TinyTheftList, UserDetail, UserList, XHRCollisionInfo,
XHRHazardInfo, XHRNearMissInfo, XHRNewInfrastructureInfo,
XHRTheftInfo)
from .termsAndConditions import termsAndConditions
from .vis import vis
| {
"content_hash": "39dc77456656556fbcf3881c78d74631",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 56.9,
"alnum_prop": 0.695079086115993,
"repo_name": "SPARLab/BikeMaps",
"id": "f386dc7c105df6d781663e9b23e34682beba9ff5",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mapApp/views/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15111"
},
{
"name": "HTML",
"bytes": "134960"
},
{
"name": "JavaScript",
"bytes": "73792"
},
{
"name": "Python",
"bytes": "965817"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
import datetime
app = Flask(__name__)
@app.route("/")
def hello():
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
templateData = {
'title' : 'HELLO!',
'time': timeString
}
return render_template('main.html', **templateData)
# @app.route('/')
# def cookie_form():
# return render_template('form_submit.html')
# @app.route('/cookie_submit/', methods=['POST'])
# def cookie_submit():
# page_data = {
# 'name' : request.form['yourname'],
# 'number' : request.form['yournumber'],
# 'status' : "on"
# }
# return render_template('form_action.html', **page_data)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True) | {
"content_hash": "0d33530faadb44117990e4ee52955a04",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.5894736842105263,
"repo_name": "emgreen33/easy_bake",
"id": "7588627557ad7a8ab0f1372bdf50ac700162cb65",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "4368"
}
],
"symlink_target": ""
} |
"""A preprocessor that extracts all of the outputs from the
notebook file. The extracted outputs are returned in the 'resources' dictionary.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from textwrap import dedent
from binascii import a2b_base64
import sys
import os
import json
from mimetypes import guess_extension
from traitlets import Unicode, Set
from .base import Preprocessor
if sys.version_info < (3,):
text_type = basestring
else:
text_type = str
def guess_extension_without_jpe(mimetype):
"""
This function fixes a problem with '.jpe' extensions
of jpeg images which are then not recognised by latex.
For any other case, the function works in the same way
as mimetypes.guess_extension
"""
ext = guess_extension(mimetype)
if ext==".jpe":
ext=".jpeg"
return ext
def platform_utf_8_encode(data):
if isinstance(data, text_type):
if sys.platform == 'win32':
data = data.replace('\n', '\r\n')
data = data.encode('utf-8')
return data
class ExtractOutputPreprocessor(Preprocessor):
"""
Extracts all of the outputs from the notebook file. The extracted
outputs are returned in the 'resources' dictionary.
"""
output_filename_template = Unicode(
"{unique_key}_{cell_index}_{index}{extension}"
).tag(config=True)
extract_output_types = Set(
{'image/png', 'image/jpeg', 'image/svg+xml', 'application/pdf'}
).tag(config=True)
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
#Get the unique key from the resource dict if it exists. If it does not
#exist, use 'output' as the default. Also, get files directory if it
#has been specified
unique_key = resources.get('unique_key', 'output')
output_files_dir = resources.get('output_files_dir', None)
#Make sure outputs key exists
if not isinstance(resources['outputs'], dict):
resources['outputs'] = {}
#Loop through all of the outputs in the cell
for index, out in enumerate(cell.get('outputs', [])):
if out.output_type not in {'display_data', 'execute_result'}:
continue
if 'text/html' in out.data:
out['data']['text/html'] = dedent(out['data']['text/html'])
#Get the output in data formats that the template needs extracted
for mime_type in self.extract_output_types:
if mime_type in out.data:
data = out.data[mime_type]
# Binary files are base64-encoded, SVG is already XML
if mime_type in {'image/png', 'image/jpeg', 'application/pdf'}:
# data is b64-encoded as text (str, unicode),
# we want the original bytes
data = a2b_base64(data)
elif mime_type == 'application/json' or not isinstance(data, text_type):
# Data is either JSON-like and was parsed into a Python
# object according to the spec, or data is for sure
# JSON. In the latter case we want to go extra sure that
# we enclose a scalar string value into extra quotes by
# serializing it properly.
if isinstance(data, bytes) and not isinstance(data, text_type):
# In python 3 we need to guess the encoding in this
# instance. Some modules that return raw data like
# svg can leave the data in byte form instead of str
data = data.decode('utf-8')
data = platform_utf_8_encode(json.dumps(data))
else:
# All other text_type data will fall into this path
data = platform_utf_8_encode(data)
ext = guess_extension_without_jpe(mime_type)
if ext is None:
ext = '.' + mime_type.rsplit('/')[-1]
if out.metadata.get('filename', ''):
filename = out.metadata['filename']
if not filename.endswith(ext):
filename+=ext
else:
filename = self.output_filename_template.format(
unique_key=unique_key,
cell_index=cell_index,
index=index,
extension=ext)
# On the cell, make the figure available via
# cell.outputs[i].metadata.filenames['mime/type']
# where
# cell.outputs[i].data['mime/type'] contains the data
if output_files_dir is not None:
filename = os.path.join(output_files_dir, filename)
out.metadata.setdefault('filenames', {})
out.metadata['filenames'][mime_type] = filename
if filename in resources['outputs']:
raise ValueError(
"Your outputs have filename metadata associated "
"with them. Nbconvert saves these outputs to "
"external files using this filename metadata. "
"Filenames need to be unique across the notebook, "
"or images will be overwritten. The filename {} is "
"associated with more than one output. The second "
"output associated with this filename is in cell "
"{}.".format(filename, cell_index)
)
#In the resources, make the figure available via
# resources['outputs']['filename'] = data
resources['outputs'][filename] = data
return cell, resources
| {
"content_hash": "41771e96f46c8e2a96efd669e149f530",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 92,
"avg_line_length": 43.4078947368421,
"alnum_prop": 0.5401636859654441,
"repo_name": "sserrot/champion_relationships",
"id": "eb8c8594eeb76749e65f505ab790937e311dde7f",
"size": "6598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/nbconvert/preprocessors/extractoutput.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
import json
class CuCoursecatalogSpiderPipeline(object):
def __init__(self):
self.file = open('cu_catalog.json', 'wb')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + '\n'
self.file.write(line)
return item
| {
"content_hash": "cbd5490561ff804632cd6831a6ec864f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 21.90909090909091,
"alnum_prop": 0.6846473029045643,
"repo_name": "xaviablaza/course-catalog-api",
"id": "e12003fc6e5177f05ff445656fe03234012a1eba",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "cu_coursecatalog_spider/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14416"
}
],
"symlink_target": ""
} |
key_val_store = {}
def put(key, data, client_addr=None):
if key == "__all__":
import json
return 0, json.dumps(key_val_store)
elif key == "peer":
try:
host,port = data.split('-')
# Extract host from client_addr, as visible to coordinator
host = client_addr['host']
id = "%s-%s" % (host,port)
try:
if id not in key_val_store["peer"]:
key_val_store["peer"].append(id)
except KeyError:
key_val_store["peer"] = [id]
except ValueError:
return 1, "Peer registration failed invalid with invalid data:%s" % data
return 0, "Peer registered"
elif key == "vote":
from .tpc import queue_response
uuid = data
# Get master host,port for sending ack and send ack
mhost,qport = key_val_store.get('master').split('-')
# Queue response ack with uuid
queue_response(3, "ack", uuid, mhost, qport)
return 0, "Recieved vote sending ack"
elif key == "ack":
uuid = data
try:
key_val_store["ack:%s" % uuid].append("")
except KeyError:
key_val_store["ack:%s" % uuid] = [""]
return 0, "Acknoledged %s" % uuid
elif key.startswith("req_ack"):
from .tpc import queue_response
try:
uuid,key = key.split('=')[1:]
# Get master host,port for sending ack and send ack
mhost,qport = key_val_store.get('master').split('-')
# Queue response ack with uuid
queue_response(3, "ack", uuid, mhost, qport)
except ValueError:
return 0, "Malformed request for ack_req type operation"
key_val_store[key] = data
return "0", "Added"
def get(key, data, client_addr=None):
if key == b"peer":
import json
return "0", json.dumps([id for id in key_val_store.get("peer", [])])
try:
return "0", key_val_store[key]
except KeyError:
return "1", "key does not exist"
def update(key, data, client_addr=None):
try:
key_val_store[key]
key_val_store[key] = data
return "0", "updated"
except KeyError:
return "1", "key does not exist"
def delete(key, data, client_addr=None):
try:
key_val_store.pop(key)
return "0", "deleted"
except KeyError:
return "1", "key does not exist"
# Lambda function dictionary
oprtns = {
0: [update, True],
1: [get , False],
2: [delete, True],
3: [put , True],
}
def prepare_response(status, reason, current_clock):
return ("%d:%s:%d" % (status, reason, current_clock)).encode()
# Helper function to read oprtn and call appropriate lambda function
def decode_HEADER(log, client_addr, header):
from .clock import get_clock,update_clock
# Get current clock
current_clock = get_clock()
# Try extracting values or fail with error message
try:
# Get client clock
header,cclock = header.split(b'==')
# If it is a client and not slave responding do not check clock
if int(cclock) != -1:
if current_clock > int(cclock):
return prepare_response(1, "Clock out of sync", current_clock)
update_clock(int(cclock)+1)
except:
log.log(0, "client did not send the clock")
return prepare_response(1, "No clock recieved", current_clock)
return b"1:No clock recieved:%d" % current_clock
try:
oprtn,key,val = header.split(b':')
key = key.decode()
val = val.decode()
except:
log.log(0, "Invalid operation or malformed request")
return current_clock(1, "Invalid input", current_clock)
return b"1:Invalid input"
log.log(2, "Receieved request, OPRTN:%s, key:%s, val:%s, client_clock:%s, current_clock:%d" %
(oprtn, key, val, cclock, current_clock))
oprtn = int(oprtn)
fnc,tpc = oprtns[oprtn]
peers = key_val_store.get("peer", [])
if tpc and peers and key not in ['ack', 'peer']:
from .tpc import get_uuid,vote,commit,check_ack
# Generate unique identifier for tracking vote and acks
uuid = get_uuid()
# Conduct vote and add to queue
vote(log, uuid, peers)
if check_ack(log, uuid, len(peers), key_val_store.get, delete):
# Commit, generating new uuid
uuid = get_uuid()
commit(log, uuid, peers, oprtn, key, val)
status,message = 0,"Commit completed"
if not check_ack(log, uuid, len(peers), key_val_store.get, delete):
status,message = 1,"All acks not received"
else:
status,message = 1,"All acks not received"
elif oprtn == 1 and peers:
from lib.tcp_client import tcp_client
log.log(2, "Requesting key: '%s' from peers" % key)
values = []
for peer in peers:
host,port = peer.split('-')
tcp_client(log, host, int(port), log, oprtn, key, None, values=values)
values = [val for key,val in values if val]
try:
status,message = 0, values[0]
except IndexError:
status,message = 1, "Key not found or data corrupted"
elif oprtn == 1 and key == "__all__":
import json
return prepare_response(0, json.dumps(key_val_store), current_clock)
else:
status,message = fnc(key,val,client_addr)
log.log(2, b"Completed request")
return prepare_response(int(status), message, current_clock)
return ("%s:%s" % (int(status),message)).encode()
| {
"content_hash": "d6a60e7dac640a43203a07e150a1a546",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 98,
"avg_line_length": 35.92356687898089,
"alnum_prop": 0.5703900709219858,
"repo_name": "varunmittal91/Distributed-Key-Value-Store",
"id": "c0a989434c1a8bf0ffff66929355671b459f94fb",
"size": "5640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19587"
},
{
"name": "Shell",
"bytes": "1935"
}
],
"symlink_target": ""
} |
"""Logging utilities."""
from absl import logging
import tensorflow.compat.v1 as tf
# Alias to TF summary module that supports eager mode and is TF2 compatible.
tf_summary = tf.compat.v2.summary
class ScalarSummary(object):
"""Enables logging a scalar metric to Tensorboard.
Example:
num_rounds = tf.Variable(0, dtype=tf.int64, trainable=False)
summary = ScalarSummary()
Anywhere in your code:
summary('summary_name', summary_value)
After each round:
num_rounds.assign_add(1)
"""
def __init__(self, step=None, scope=None, enable_tf=False, verbose=1):
"""Creates an instance of this class.
Args:
step: An optional `tf.Variable` for tracking the logging step. If `None`,
will use the global Tensorflow step variable.
scope: An optional string that is prepended to metric names passed to
`__call__`.
enable_tf: Whether to create a TF summary.
verbose: Whether to also summaries to the console.
"""
self._step = tf.train.get_or_create_global_step() if step is None else step
self._scope = scope
self._verbose = verbose
self._enable_tf = enable_tf
def __call__(self, name, value, step=None):
"""Creates or updates summary `name` with `value`.
Args:
name: The name of the summary.
value: The value of the summary.
step: An optional step variable. If `None`, will use the step variable
passed to the constructor.
"""
step = self._step if step is None else step
if self._scope:
name = self._scope + name
if self._enable_tf:
tf_summary.scalar(name, value, step=step)
if self._verbose:
logging.info('Summary step=%d: %s=%.3f', int(step), name, value)
| {
"content_hash": "d09a3dd93cd82584209ed1ee37d3a3c9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 31.454545454545453,
"alnum_prop": 0.6589595375722543,
"repo_name": "google-research/google-research",
"id": "3128d61e0b5f430dccc10d110301ac032300c324",
"size": "2338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protein_lm/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import json
from unittest import TestCase
from core.curlcommand import CurlCommand
class CurlCommandTest(TestCase):
def test_get_method(self):
method = "get"
url = "http://example.com/api/v2/test"
data = "{\"id\": 1, \"name\": \"Foo\"}"
headers = json.loads('{}')
curlcommand = CurlCommand(url, method, data, headers)
self.assertEqual(curlcommand.get(), "curl -XGET -H \"Content-type: application/json\" -d "
"'{\"id\": 1, \"name\": \"Foo\"}' http://example.com/api/v2/test")
def test_post_method(self):
method = "pOsT"
url = "http://example.com/api/post"
data = "{\"id\": 2, \"name\": \"Bar\"}"
headers = json.loads('{}')
curlcommand = CurlCommand(url, method, data, headers)
self.assertEqual(curlcommand.get(), "curl -XPOST -H \"Content-type: application/json\" -d "
"'{\"id\": 2, \"name\": \"Bar\"}' http://example.com/api/post")
def test_empty_data(self):
method = "get"
url = "http://example.com/api/v2/list"
data = ""
headers = json.loads('{}')
curlcommand = CurlCommand(url, method, data, headers)
self.assertEqual(curlcommand.get(), "curl -XGET -H \"Content-type: application/json\" "
"http://example.com/api/v2/list")
def test_generate_headers(self):
method = "get"
url = "http://example.com/api/v2/list"
data = ""
headers = json.loads('{ \"X-API-Key\": \"abcdef12345\", \"user-agent\": \"tntfuzzer\" }')
expected_result = u'-H \"Content-type: application/json\" -H \"X-API-Key\": \"abcdef12345\" ' \
u'-H \"user-agent\": \"tntfuzzer\"'
curlcommand = CurlCommand(url, method, data, headers)
self.assertEqual(curlcommand.generate_headers(), expected_result)
def test_generate_headers_returns_contenttype_only_when_headers_nonetype(self):
method = "get"
url = "http://example.com/api/v2/list"
data = ""
expected_result = u'-H \"Content-type: application/json\"'
curlcommand = CurlCommand(url, method, data, None)
self.assertEqual(curlcommand.generate_headers(), expected_result)
| {
"content_hash": "bff44f5fa17c7d915030afa7524932d0",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 110,
"avg_line_length": 41.5,
"alnum_prop": 0.5550774526678142,
"repo_name": "Teebytes/TnT-Fuzzer",
"id": "8a2a892266b6a064375553f4c399ef93bc077ffa",
"size": "2324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tntfuzzer/tests/core/curlcommandtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "295"
},
{
"name": "Python",
"bytes": "51826"
}
],
"symlink_target": ""
} |
"""
Django settings for stalkexchange project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os import environ
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zx4-r%+!$3xkwm-b(pd&+2()wzks6wyfke$ye7&6e_kuvdw6d9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 2
# Application definition
INSTALLED_APPS = (
'autocomplete_light',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'social.apps.django_app.default',
#'django_messages',
'postman',
'storages',
'waliki',
'produce',
'userprofile',
'wishlist',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'stalkexchange.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["stalkexchange/templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'stalkexchange.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
import dj_database_url
config = dj_database_url.config()
if config != {}:
DATABASES = dict()
DATABASES['default'] = config
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
POSTMAN_DISALLOW_ANONYMOUS = True # default is False
POSTMAN_DISALLOW_MULTIRECIPIENTS = True # default is False
POSTMAN_DISALLOW_COPIES_ON_REPLY = True # default is False
POSTMAN_DISABLE_USER_EMAILING = True # default is False
POSTMAN_AUTO_MODERATE_AS = True # default is None
POSTMAN_SHOW_USER_AS = 'get_full_name' # default is None
# POSTMAN_QUICKREPLY_QUOTE_BODY = True # default is False
# POSTMAN_NOTIFIER_APP = None # default is 'notification'
# POSTMAN_MAILER_APP = None # default is 'mailer'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files-files/
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
WALIKI_ANONYMOUS_USER_PERMISSIONS = ('view_page', 'add_page', 'change_page')
WALIKI_LOGGED_USER_PERMISSIONS = ('view_page', 'add_page', 'change_page')
MEDIA_ROOT="media"
MEDIA_URL="/media/"
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static-files'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = "723613493129-leu9lk14jfamn7270mcnc2p43erbmin0.apps.googleusercontent.com"
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = "Dgx90f3FsZdRqYg08bpmJyqe"
SOCIAL_AUTH_FACEBOOK_KEY = '962707797122351'
SOCIAL_AUTH_FACEBOOK_SECRET = '355c689e24e40ff1cd9c48b6af17c5f9'
SOCIAL_AUTH_FACEBOOK_EXTENDED_PERMISSIONS = ['email',]
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email',]
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'userprofile.views.get_avatar', #save facebook profile image,
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
THUMBNAIL_DEFAULT_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_REGION='us-west-2'
AWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = "stalkexchange"
AWS_QUERYSTRING_AUTH = False #doesn't work!
AWS_HEADERS = {
'Cache-Control': 'max-age=86400',
}
# from urlparse import urlparse
#
# es = urlparse('http://paas:a226812db9d9ad103f67b02dbb57b898@fili-us-east-1.searchly.com')
#
# port = es.port or 80
#
# HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'URL': es.scheme + '://' + es.hostname + ':' + str(port),
# 'INDEX_NAME': 'documents',
# },
# }
#
#
# if es.username:
# HAYSTACK_CONNECTIONS['default']['KWARGS'] = {"http_auth": es.username + ':' + es.password}
| {
"content_hash": "5a1a1fd32633284c4f69b91090c1ef4e",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 106,
"avg_line_length": 30.009615384615383,
"alnum_prop": 0.7005767382249279,
"repo_name": "jmickela/stalkexchange",
"id": "7e12dd645ce6707b8111d8fd24fb0103383807fa",
"size": "6242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stalkexchange/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "54173"
},
{
"name": "HTML",
"bytes": "19514"
},
{
"name": "Python",
"bytes": "26312"
},
{
"name": "Ruby",
"bytes": "900"
}
],
"symlink_target": ""
} |
""" Get data regarding platform specific results
input table is our length data file
1. read name
2. alignment type
3. best aligned length
4. total aligned length
5. read length
"""
import sys, argparse, re, gzip, inspect, os
from seqtools.statistics import average
def main(args):
#define read name programs
#ONT basecalls
#ont matches a uuid4
ont_prog = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}_Basecall_2D_(.*)$')
pacbio_prog = re.compile('^(m[^\/]+)\/(\d+)\/(ccs|\d+_\d+)')
inf = None
if re.search('\.gz$',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
unclassified = {'aligned':0,'unaligned':0}
classified = {}
pb_cell = {}
#pb_mol = set()
for line in inf:
f = line.rstrip().split("\t")
name = f[0]
m_ont = ont_prog.match(name)
m_pb = pacbio_prog.match(name)
if m_ont:
if 'ONT' not in classified:
classified['ONT'] = {}
type = m_ont.group(1)
if type not in classified['ONT']:
classified['ONT'][type] = {'aligned':0,'unaligned':0}
if f[1] != 'unaligned':
classified['ONT'][type]['aligned']+=1
else:
classified['ONT'][type]['unaligned']+=1
elif m_pb:
cell = m_pb.group(1)
mol = int(m_pb.group(2))
if cell not in pb_cell: pb_cell[cell] = {'molecules':set(),'reads':0,'molecules_aligned':set()}
pb_cell[cell]['molecules'].add(mol)
pb_cell[cell]['reads'] += 1
#pb_mol.add(mol)
if 'PacBio' not in classified:
classified['PacBio'] = {}
type = 'ccs'
if m_pb.group(3) != 'ccs': type = 'subread'
if type not in classified['PacBio']:
classified['PacBio'][type] = {'aligned':0,'unaligned':0}
if f[1] != 'unaligned':
classified['PacBio'][type]['aligned']+=1
pb_cell[cell]['molecules_aligned'].add(mol)
else:
classified['PacBio'][type]['unaligned']+=1
else:
if f[1] != 'unaligned':
unclassified['aligned']+=1
else:
unclassified['unaligned']+=1
inf.close()
# Finished reading the reads now we can make a report
of = open(args.output_base,'w')
if len(classified.keys()) > 0:
of.write("SP\n")
for classification in sorted(classified.keys()):
for subclass in sorted(classified[classification].keys()):
dat = classified[classification][subclass]
of.write("GN\t"+classification+"\t"+subclass+"\t"+str(sum(dat.values()))+"\t"+str(dat['aligned'])+"\t"+str(dat['unaligned'])+"\n")
of.write("GN\tUnclassified\t\t"+str(sum(unclassified.values()))+"\t"+str(unclassified['aligned'])+"\t"+str(unclassified['unaligned'])+"\n")
if 'PacBio' in classified:
of.write("PB\tCell Count\t"+str(len(pb_cell.keys()))+"\n")
of.write("PB\tMolecule Count\t"+str(sum([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tAligned Molecule Count\t"+str(sum([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMax Reads Per Cell\t"+str(max([pb_cell[x]['reads'] for x in pb_cell.keys()]))+"\n")
of.write("PB\tAvg Reads Per Cell\t"+str(average([pb_cell[x]['reads'] for x in pb_cell.keys()]))+"\n")
of.write("PB\tMin Reads Per Cell\t"+str(min([pb_cell[x]['reads'] for x in pb_cell.keys()]))+"\n")
of.write("PB\tMax Molecules Per Cell\t"+str(max([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tAvg Molecules Per Cell\t"+str(average([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMin Molecules Per Cell\t"+str(min([len(pb_cell[x]['molecules']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMax Aligned Molecules Per Cell\t"+str(max([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tAvg Aligned Molecules Per Cell\t"+str(average([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
of.write("PB\tMin Aligned Molecules Per Cell\t"+str(min([len(pb_cell[x]['molecules_aligned']) for x in pb_cell.keys()]))+"\n")
mols = [[len(pb_cell[x]['molecules_aligned']),len(pb_cell[x]['molecules'])] for x in pb_cell.keys()]
smols = sorted(mols,key=lambda x: x[0])
of.write("PB\tMolecules Per Cell Distro\t"+",".join(['/'.join([str(x[0]),str(x[1])]) for x in smols])+"\n")
of1 = open(args.output_base+'.pacbio','w')
for val in smols:
of1.write(str(val[0])+"\t"+str(val[1])+"\n")
of1.close()
of.close()
def do_args():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="input lengths table")
parser.add_argument('output_base',help="output file basename")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_args()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_args()
main(args)
| {
"content_hash": "609027978e9f045a5419e7f86dafdb6e",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 141,
"avg_line_length": 41.91525423728814,
"alnum_prop": 0.6101900525677315,
"repo_name": "jason-weirather/AlignQC",
"id": "f30e93c0410e846bb79dd98864a33c17b81b9424",
"size": "4946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alignqc/get_platform_report.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2657"
},
{
"name": "Python",
"bytes": "228061"
},
{
"name": "R",
"bytes": "54818"
},
{
"name": "Shell",
"bytes": "27"
}
],
"symlink_target": ""
} |
"""
<Program Name>
test_gpg.py
<Author>
Santiago Torres-Arias <santiago@nyu.edu>
Lukas Puehringer <lukas.puehringer@nyu.edu>
<Started>
Nov 15, 2017
<Copyright>
See LICENSE for licensing information.
<Purpose>
Test gpg/pgp-related functions.
"""
import os
import shutil
import sys
import tempfile
import unittest
if sys.version_info >= (3, 3):
from unittest.mock import ( # pylint: disable=no-name-in-module,import-error
patch,
)
else:
from mock import patch # pylint: disable=import-error
# pylint: disable=wrong-import-position
from collections import OrderedDict
from copy import deepcopy
import cryptography.hazmat.backends as backends # pylint: disable=consider-using-from-import
import cryptography.hazmat.primitives.hashes as hashing
import cryptography.hazmat.primitives.serialization as serialization # pylint: disable=consider-using-from-import
from securesystemslib import exceptions, process
from securesystemslib.formats import ANY_PUBKEY_DICT_SCHEMA, GPG_PUBKEY_SCHEMA
from securesystemslib.gpg.common import (
_assign_certified_key_info,
_get_verified_subkeys,
get_pubkey_bundle,
parse_pubkey_bundle,
parse_pubkey_payload,
parse_signature_packet,
)
from securesystemslib.gpg.constants import (
PACKET_TYPE_PRIMARY_KEY,
PACKET_TYPE_SUB_KEY,
PACKET_TYPE_USER_ATTR,
PACKET_TYPE_USER_ID,
SHA1,
SHA256,
SHA512,
gpg_export_pubkey_command,
have_gpg,
)
from securesystemslib.gpg.dsa import create_pubkey as dsa_create_pubkey
# pylint: disable=unused-import
from securesystemslib.gpg.eddsa import ED25519_SIG_LENGTH
from securesystemslib.gpg.eddsa import create_pubkey as eddsa_create_pubkey
# pylint: enable=unused-import
from securesystemslib.gpg.exceptions import (
CommandError,
KeyExpirationError,
KeyNotFoundError,
PacketParsingError,
PacketVersionNotSupportedError,
SignatureAlgorithmNotSupportedError,
)
from securesystemslib.gpg.functions import (
create_signature,
export_pubkey,
export_pubkeys,
verify_signature,
)
from securesystemslib.gpg.rsa import create_pubkey as rsa_create_pubkey
from securesystemslib.gpg.util import (
Version,
get_hashing_class,
get_version,
is_version_fully_supported,
parse_packet_header,
parse_subpacket_header,
)
# pylint: enable=wrong-import-position
class GPGTestUtils:
"""GPG Test utility class"""
@staticmethod
def ignore_not_found_error(
function, path, exc_info
): # pylint: disable=unused-argument,unused-argument
"""Callback that ignores FileNotFoundError"""
_, error, _ = exc_info
if not isinstance(error, FileNotFoundError):
raise error
@unittest.skipIf(not have_gpg(), "gpg not found")
class TestUtil(unittest.TestCase):
"""Test util functions."""
def test_version_utils_return_types(self):
"""Run dummy tests for coverage."""
self.assertTrue(isinstance(get_version(), Version))
self.assertTrue(isinstance(is_version_fully_supported(), bool))
def test_version_utils_error(self):
"""Run dummy tests for coverage."""
with patch(
"securesystemslib.gpg.constants.have_gpg", return_value=False
):
with self.assertRaises(exceptions.UnsupportedLibraryError):
get_version()
def test_get_hashing_class(self):
# Assert return expected hashing class
expected_hashing_class = [hashing.SHA1, hashing.SHA256, hashing.SHA512]
for idx, hashing_id in enumerate([SHA1, SHA256, SHA512]):
result = get_hashing_class(hashing_id)
self.assertEqual(result, expected_hashing_class[idx])
# Assert raises ValueError with non-supported hashing id
with self.assertRaises(ValueError):
get_hashing_class("bogus_hashing_id")
def test_parse_packet_header(self):
"""Test parse_packet_header with manually crafted data."""
data_list = [
## New format packet length with mock packet type 100001
# one-octet length, header len: 2, body len: 0 to 191
[0b01100001, 0],
[0b01100001, 191],
# two-octet length, header len: 3, body len: 192 to 8383
[0b01100001, 192, 0],
[0b01100001, 223, 255],
# five-octet length, header len: 6, body len: 0 to 4,294,967,295
[0b01100001, 255, 0, 0, 0, 0],
[0b01100001, 255, 255, 255, 255, 255],
## Old format packet lengths with mock packet type 1001
# one-octet length, header len: 2, body len: 0 to 255
[0b00100100, 0],
[0b00100100, 255],
# two-octet length, header len: 3, body len: 0 to 65,535
[0b00100101, 0, 0],
[0b00100101, 255, 255],
# four-octet length, header len: 5, body len: 0 to 4,294,967,295
[0b00100110, 0, 0, 0, 0, 0],
[0b00100110, 255, 255, 255, 255, 255],
]
# packet_type | header_len | body_len | packet_len
expected = [
(33, 2, 0, 2),
(33, 2, 191, 193),
(33, 3, 192, 195),
(33, 3, 8383, 8386),
(33, 6, 0, 6),
(33, 6, 4294967295, 4294967301),
(9, 2, 0, 2),
(9, 2, 255, 257),
(9, 3, 0, 3),
(9, 3, 65535, 65538),
(9, 5, 0, 5),
(9, 5, 4294967295, 4294967300),
]
for idx, data in enumerate(data_list):
result = parse_packet_header(bytearray(data))
self.assertEqual(result, expected[idx])
# New Format Packet Lengths with Partial Body Lengths range
for second_octet in [224, 254]:
with self.assertRaises(PacketParsingError):
parse_packet_header(bytearray([0b01100001, second_octet]))
# Old Format Packet Lengths with indeterminate length (length type 3)
with self.assertRaises(PacketParsingError):
parse_packet_header(bytearray([0b00100111]))
# Get expected type
parse_packet_header(bytearray([0b01100001, 0]), expected_type=33)
# Raise with unexpected type
with self.assertRaises(PacketParsingError):
parse_packet_header(bytearray([0b01100001, 0]), expected_type=34)
def test_parse_subpacket_header(self):
"""Test parse_subpacket_header with manually crafted data."""
# All items until last item encode the length of the subpacket,
# the last item encodes the mock subpacket type.
data_list = [
# length of length 1, subpacket length 0 to 191
[0, 33], # NOTE: Nonsense 0-length
[191, 33],
# # length of length 2, subpacket length 192 to 16,319
[192, 0, 33],
[254, 255, 33],
# # length of length 5, subpacket length 0 to 4,294,967,295
[255, 0, 0, 0, 0, 33], # NOTE: Nonsense 0-length
[255, 255, 255, 255, 255, 33],
]
# packet_type | header_len | body_len | packet_len
expected = [
(33, 2, -1, 1), # NOTE: Nonsense negative payload
(33, 2, 190, 192),
(33, 3, 191, 194),
(33, 3, 16318, 16321),
(33, 6, -1, 5), # NOTE: Nonsense negative payload
(33, 6, 4294967294, 4294967300),
]
for idx, data in enumerate(data_list):
result = parse_subpacket_header(bytearray(data))
self.assertEqual(result, expected[idx])
@unittest.skipIf(not have_gpg(), "gpg not found")
class TestCommon(unittest.TestCase):
"""Test common functions of the securesystemslib.gpg module."""
@classmethod
def setUpClass(self): # pylint: disable=bad-classmethod-argument
gpg_keyring_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gpg_keyrings", "rsa"
)
homearg = (
"--homedir {}".format( # pylint: disable=consider-using-f-string
gpg_keyring_path
).replace("\\", "/")
)
# Load test raw public key bundle from rsa keyring, used to construct
# erroneous gpg data in tests below.
keyid = "F557D0FF451DEF45372591429EA70BD13D883381"
cmd = gpg_export_pubkey_command(keyid=keyid, homearg=homearg)
proc = process.run(cmd, stdout=process.PIPE, stderr=process.PIPE)
self.raw_key_data = proc.stdout
self.raw_key_bundle = parse_pubkey_bundle(self.raw_key_data)
# Export pubkey bundle with expired key for key expiration tests
keyid = "E8AC80C924116DABB51D4B987CB07D6D2C199C7C"
cmd = gpg_export_pubkey_command(keyid=keyid, homearg=homearg)
proc = process.run(cmd, stdout=process.PIPE, stderr=process.PIPE)
self.raw_expired_key_bundle = parse_pubkey_bundle(proc.stdout)
def test_parse_pubkey_payload_errors(self):
"""Test parse_pubkey_payload errors with manually crafted data."""
# passed data | expected error | expected error message
test_data = [
(None, ValueError, "empty pubkey"),
(
bytearray([0x03]),
PacketVersionNotSupportedError,
"packet version '3' not supported",
),
(
bytearray([0x04, 0, 0, 0, 0, 255]),
SignatureAlgorithmNotSupportedError,
"Signature algorithm '255' not supported",
),
]
for data, error, error_str in test_data:
with self.assertRaises(error) as ctx:
parse_pubkey_payload(data)
self.assertTrue(error_str in str(ctx.exception))
def test_parse_pubkey_bundle_errors(self):
"""Test parse_pubkey_bundle errors with manually crafted data partially
based on real gpg key data (see self.raw_key_bundle)."""
# Extract sample (legitimate) user ID packet and pass as first packet to
# raise first packet must be primary key error
user_id_packet = list(self.raw_key_bundle[PACKET_TYPE_USER_ID].keys())[
0
]
# Extract sample (legitimate) primary key packet and pass as first two
# packets to raise unexpected second primary key error
primary_key_packet = self.raw_key_bundle[PACKET_TYPE_PRIMARY_KEY][
"packet"
]
# Create incomplete packet to re-raise header parsing IndexError as
# PacketParsingError
incomplete_packet = bytearray([0b01111111])
# passed data | expected error message
test_data = [
(None, "empty gpg data"),
(user_id_packet, "must be a primary key"),
(primary_key_packet + primary_key_packet, "Unexpected primary key"),
(incomplete_packet, "index out of range"),
]
for data, error_str in test_data:
with self.assertRaises(PacketParsingError) as ctx:
parse_pubkey_bundle(data)
self.assertTrue(error_str in str(ctx.exception))
# Create empty packet of unsupported type 66 (bit 0-5) and length 0 and
# pass as second packet to provoke skipping of unsupported packet
unsupported_packet = bytearray([0b01111111, 0])
with patch("securesystemslib.gpg.common.log") as mock_log:
parse_pubkey_bundle(primary_key_packet + unsupported_packet)
self.assertTrue(
"Ignoring gpg key packet '63'" in mock_log.info.call_args[0][0]
)
def test_parse_pubkey_bundle(self):
"""Assert presence of packets expected returned from `parse_pubkey_bundle`
for specific test key). See
```
gpg --homedir tests/gpg_keyrings/rsa/ --export 9EA70BD13D883381 | \
gpg --list-packets
```
"""
# Expect parsed primary key matching GPG_PUBKEY_SCHEMA
self.assertTrue(
GPG_PUBKEY_SCHEMA.matches(
self.raw_key_bundle[PACKET_TYPE_PRIMARY_KEY]["key"]
)
)
# Parse corresponding raw packet for comparison
_, header_len, _, _ = parse_packet_header(
self.raw_key_bundle[PACKET_TYPE_PRIMARY_KEY]["packet"]
)
# pylint: disable=unsubscriptable-object
parsed_raw_packet = parse_pubkey_payload(
bytearray(
self.raw_key_bundle[PACKET_TYPE_PRIMARY_KEY]["packet"][
header_len:
]
)
)
# And compare
self.assertDictEqual(
self.raw_key_bundle[PACKET_TYPE_PRIMARY_KEY]["key"],
parsed_raw_packet,
)
# Expect one primary key signature (revocation signature)
self.assertEqual(
len(self.raw_key_bundle[PACKET_TYPE_PRIMARY_KEY]["signatures"]), 1
)
# Expect one User ID packet, one User Attribute packet and one Subkey,
# each with correct data
for _type in [
PACKET_TYPE_USER_ID,
PACKET_TYPE_USER_ATTR,
PACKET_TYPE_SUB_KEY,
]:
# Of each type there is only one packet
self.assertTrue(len(self.raw_key_bundle[_type]) == 1)
# The raw packet is stored as key in the per-packet type collection
raw_packet = next(iter(self.raw_key_bundle[_type]))
# Its values are the raw packets header and body length
self.assertEqual(
len(raw_packet),
self.raw_key_bundle[_type][raw_packet]["header_len"]
+ self.raw_key_bundle[_type][raw_packet]["body_len"],
)
# and one self-signature
self.assertEqual(
len(self.raw_key_bundle[_type][raw_packet]["signatures"]), 1
)
def test_assign_certified_key_info_errors(self):
"""Test _assign_certified_key_info errors with manually crafted data
based on real gpg key data (see self.raw_key_bundle)."""
# Replace legitimate user certifacte with a bogus packet
wrong_cert_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = wrong_cert_bundle[PACKET_TYPE_USER_ID].popitem()
packet_data["signatures"] = [bytearray([0b01111111, 0])]
wrong_cert_bundle[PACKET_TYPE_USER_ID][packet] = packet_data
# Replace primary key id with a non-associated keyid
wrong_keyid_bundle = deepcopy(self.raw_key_bundle)
wrong_keyid_bundle[PACKET_TYPE_PRIMARY_KEY]["key"][
"keyid"
] = "8465A1E2E0FB2B40ADB2478E18FB3F537E0C8A17"
# Remove a byte in user id packet to make signature verification fail
invalid_cert_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = invalid_cert_bundle[PACKET_TYPE_USER_ID].popitem()
packet = packet[:-1]
invalid_cert_bundle[PACKET_TYPE_USER_ID][packet] = packet_data
test_data = [
# Skip and log parse_signature_packet error
(wrong_cert_bundle, "Expected packet 2, but got 63 instead"),
# Skip and log signature packet that doesn't match primary key id
(wrong_keyid_bundle, "Ignoring User ID certificate issued by"),
# Skip and log invalid signature
(invalid_cert_bundle, "Ignoring invalid User ID self-certificate"),
]
for bundle, expected_msg in test_data:
with patch("securesystemslib.gpg.common.log") as mock_log:
_assign_certified_key_info(bundle)
msg = str(mock_log.info.call_args[0][0])
self.assertTrue(
expected_msg in msg,
"'{}' not in '{}'".format( # pylint: disable=consider-using-f-string
expected_msg, msg
),
)
def test_assign_certified_key_info_expiration(self):
"""Test assignment of key expiration date in
gpg.common._assign_certified_key_info using real gpg data (with ambiguity
resolution / prioritization).
# FIXME: Below tests are missing proper assertions for which User ID
self-certificate is considered for the expiration date. Reasons are:
- gpg does not let you (easily) modify individual expiration dates of User
IDs (changing one changes all), hence we cannot assert the chosen packet
by the particular date
- _assign_certified_key_info first verifies all self-certificates and then
only considers successfully verified ones, hence we cannot modify the
certificate data, before passing it to _assign_certified_key_info
IMO the best solution is a better separation of concerns, e.g. separate
self-certificate verification and packet prioritization.
"""
# Test ambiguity resolution scheme with 3 User IDs
# :user ID packet: "Test Expiration I <test@expir.one>"
# :user ID packet: "Test Expiration II <test@expir.two>"
# :user ID packet: "Test Expiration III <test@expir.three>"
# User ID packets are ordered by their creation time in ascending order.
# "Test Expiration II" has the primary user ID flag set and therefor has
# the highest priority.
key = _assign_certified_key_info(self.raw_expired_key_bundle)
self.assertTrue(key["validity_period"] == 87901) # ~ 1 day
# Test ambiguity resolution scheme with 2 User IDs
# :user ID packet: "Test Expiration III <test@expir.three>"
# :user ID packet: "Test Expiration I <test@expir.one>"
# User ID packets are ordered by their creation time in descending order.
# Neither packet has the primary user ID flag set.
# "Test Expiration III" has the highest priority.
raw_key_bundle = deepcopy(self.raw_expired_key_bundle)
user_id_items = list(
reversed(raw_key_bundle[PACKET_TYPE_USER_ID].items())
)
del user_id_items[1]
raw_key_bundle[PACKET_TYPE_USER_ID] = OrderedDict(user_id_items)
key = _assign_certified_key_info(raw_key_bundle)
self.assertTrue(key["validity_period"] == 87901) # ~ 1 day
def test_get_verified_subkeys_errors(self):
"""Test _get_verified_subkeys errors with manually crafted data based on
real gpg key data (see self.raw_key_bundle)."""
# Tamper with subkey (change version number) to trigger key parsing error
bad_subkey_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = bad_subkey_bundle[PACKET_TYPE_SUB_KEY].popitem()
packet = bytes(
packet[: packet_data["header_len"]]
+ bytearray([0x03])
+ packet[packet_data["header_len"] + 1 :]
)
bad_subkey_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Add bogus sig to trigger sig parsing error
wrong_sig_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = wrong_sig_bundle[PACKET_TYPE_SUB_KEY].popitem()
# NOTE: We can't only pass the bogus sig, because that would also trigger
# the not enough sigs error (see not_enough_sigs_bundle) and mock only
# lets us assert for the most recent log statement
packet_data["signatures"].append(bytearray([0b01111111, 0]))
wrong_sig_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Remove sigs to trigger not enough sigs error
not_enough_sigs_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = not_enough_sigs_bundle[
PACKET_TYPE_SUB_KEY
].popitem()
packet_data["signatures"] = []
not_enough_sigs_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Duplicate sig to trigger wrong amount signatures
too_many_sigs_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = too_many_sigs_bundle[
PACKET_TYPE_SUB_KEY
].popitem()
packet_data["signatures"] = packet_data["signatures"] * 2
too_many_sigs_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Tamper with primary key to trigger signature verification error
invalid_sig_bundle = deepcopy(self.raw_key_bundle)
invalid_sig_bundle[PACKET_TYPE_PRIMARY_KEY][
"packet"
] = invalid_sig_bundle[PACKET_TYPE_PRIMARY_KEY]["packet"][:-1]
test_data = [
(bad_subkey_bundle, "Pubkey packet version '3' not supported"),
(wrong_sig_bundle, "Expected packet 2, but got 63 instead"),
(
not_enough_sigs_bundle,
"wrong amount of key binding signatures (0)",
),
(
too_many_sigs_bundle,
"wrong amount of key binding signatures (2)",
),
(invalid_sig_bundle, "invalid key binding signature"),
]
for bundle, expected_msg in test_data:
with patch("securesystemslib.gpg.common.log") as mock_log:
_get_verified_subkeys(bundle)
msg = str(mock_log.info.call_args[0][0])
self.assertTrue(
expected_msg in msg,
"'{}' not in '{}'".format( # pylint: disable=consider-using-f-string
expected_msg, msg
),
)
def test_get_verified_subkeys(self):
"""Test correct assignment of subkey expiration date in
gpg.common._get_verified_subkeys using real gpg data."""
subkeys = _get_verified_subkeys(self.raw_expired_key_bundle)
# Test subkey with validity period 175451, i.e. ~ 2 days
self.assertTrue(
subkeys["0ce427fa3f0f50bc83a4a760ed95e1581691db4d"].get(
"validity_period"
)
== 175451
)
# Test subkey without validity period, i.e. it does not expire
self.assertTrue(
subkeys[ # pylint: disable=singleton-comparison
"70cfabf1e2f1dc60ac5c7bca10cd20d3d5bcb6ef"
].get("validity_period")
== None
)
def test_get_pubkey_bundle_errors(self):
"""Test correct error raising in get_pubkey_bundle."""
# Call without key data
with self.assertRaises(KeyNotFoundError):
get_pubkey_bundle(None, "deadbeef")
# Pass wrong keyid with valid gpg data to trigger KeyNotFoundError.
not_associated_keyid = "8465A1E2E0FB2B40ADB2478E18FB3F537E0C8A17"
with self.assertRaises(KeyNotFoundError):
get_pubkey_bundle(self.raw_key_data, not_associated_keyid)
def test_parse_signature_packet_errors(self):
"""Test parse_signature_packet errors with manually crafted data."""
# passed data | expected error message
test_data = [
(
bytearray([0b01000010, 1, 255]),
"Signature version '255' not supported",
),
(
bytearray([0b01000010, 2, 4, 255]),
"Signature type '255' not supported",
),
(
bytearray([0b01000010, 3, 4, 0, 255]),
"Signature algorithm '255' not supported",
),
(
bytearray([0b01000010, 4, 4, 0, 1, 255]),
"Hash algorithm '255' not supported",
),
]
for data, expected_error_str in test_data:
with self.assertRaises(ValueError) as ctx:
parse_signature_packet(data)
self.assertTrue(
expected_error_str in str(ctx.exception),
"'{}' not in '{}'".format( # pylint: disable=consider-using-f-string
expected_error_str, str(ctx.exception)
),
)
@unittest.skipIf(not have_gpg(), "gpg not found")
class TestGPGRSA(unittest.TestCase):
"""Test signature creation, verification and key export from the gpg
module"""
default_keyid = "8465A1E2E0FB2B40ADB2478E18FB3F537E0C8A17"
signing_subkey_keyid = "C5A0ABE6EC19D0D65F85E2C39BE9DF5131D924E9"
encryption_subkey_keyid = "6A112FD3390B2E53AFC2E57F8FC8E12099AECEEA"
unsupported_subkey_keyid = "611A9B648E16F54E8A7FAD5DA51E8CDF3B06524F"
expired_key_keyid = "E8AC80C924116DABB51D4B987CB07D6D2C199C7C"
keyid_768C43 = "7B3ABB26B97B655AB9296BD15B0BD02E1C768C43" # pylint: disable=invalid-name
@classmethod
def setUpClass(self): # pylint: disable=bad-classmethod-argument
# Create directory to run the tests without having everything blow up
self.working_dir = os.getcwd()
# Find demo files
gpg_keyring_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gpg_keyrings", "rsa"
)
self.test_dir = os.path.realpath(tempfile.mkdtemp())
self.gnupg_home = os.path.join(self.test_dir, "rsa")
shutil.copytree(gpg_keyring_path, self.gnupg_home)
os.chdir(self.test_dir)
@classmethod
def tearDownClass(self): # pylint: disable=bad-classmethod-argument
"""Change back to initial working dir and remove temp test directory."""
os.chdir(self.working_dir)
shutil.rmtree(
self.test_dir, onerror=GPGTestUtils.ignore_not_found_error
)
def test_export_pubkey_error(self):
"""Test correct error is raised if function called incorrectly."""
with self.assertRaises(ValueError):
export_pubkey("not-a-key-id")
def test_export_pubkey(self):
"""export a public key and make sure the parameters are the right ones:
since there's very little we can do to check rsa key parameters are right
we pre-exported the public key to an ssh key, which we can load with
cryptography for the sake of comparison"""
# export our gpg key, using our functions
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
our_exported_key = rsa_create_pubkey(key_data)
# load the equivalent ssh key, and make sure that we get the same RSA key
# parameters
ssh_key_basename = (
"{}.ssh".format( # pylint: disable=consider-using-f-string
self.default_keyid
)
)
ssh_key_path = os.path.join(self.gnupg_home, ssh_key_basename)
with open(ssh_key_path, "rb") as fp:
keydata = fp.read()
ssh_key = serialization.load_ssh_public_key(
keydata, backends.default_backend()
)
self.assertEqual(
ssh_key.public_numbers().n, our_exported_key.public_numbers().n
)
self.assertEqual(
ssh_key.public_numbers().e, our_exported_key.public_numbers().e
)
subkey_keyids = list(key_data["subkeys"].keys())
# We export the whole master key bundle which must contain the subkeys
self.assertTrue(self.signing_subkey_keyid.lower() in subkey_keyids)
# Currently we do not exclude encryption subkeys
self.assertTrue(self.encryption_subkey_keyid.lower() in subkey_keyids)
# However we do exclude subkeys, whose algorithm we do not support
self.assertFalse(self.unsupported_subkey_keyid.lower() in subkey_keyids)
# When passing the subkey keyid we also export the whole keybundle
key_data2 = export_pubkey(
self.signing_subkey_keyid, homedir=self.gnupg_home
)
self.assertDictEqual(key_data, key_data2)
def test_export_pubkeys(self):
"""Test export multiple pubkeys at once."""
key_dict = export_pubkeys(
[self.default_keyid, self.keyid_768C43], homedir=self.gnupg_home
)
ANY_PUBKEY_DICT_SCHEMA.check_match(key_dict)
self.assertListEqual(
sorted([self.default_keyid.lower(), self.keyid_768C43.lower()]),
sorted(key_dict.keys()),
)
def test_gpg_sign_and_verify_object_with_default_key(self):
"""Create a signature using the default key on the keyring"""
test_data = b"test_data"
wrong_data = b"something malicious"
signature = create_signature(test_data, homedir=self.gnupg_home)
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key_data, test_data))
self.assertFalse(verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object(self):
"""Create a signature using a specific key on the keyring"""
test_data = b"test_data"
wrong_data = b"something malicious"
signature = create_signature(
test_data, keyid=self.default_keyid, homedir=self.gnupg_home
)
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key_data, test_data))
self.assertFalse(verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object_default_keyring(self):
"""Sign/verify using keyring from envvar."""
test_data = b"test_data"
gnupg_home_backup = os.environ.get("GNUPGHOME")
os.environ["GNUPGHOME"] = self.gnupg_home
signature = create_signature(test_data, keyid=self.default_keyid)
key_data = export_pubkey(self.default_keyid)
self.assertTrue(verify_signature(signature, key_data, test_data))
# Reset GNUPGHOME
if gnupg_home_backup:
os.environ["GNUPGHOME"] = gnupg_home_backup
else:
del os.environ["GNUPGHOME"]
def test_create_signature_with_expired_key(self):
"""Test signing with expired key raises gpg CommandError."""
with self.assertRaises(CommandError) as ctx:
create_signature(
b"livestock",
keyid=self.expired_key_keyid,
homedir=self.gnupg_home,
)
expected = "returned non-zero exit status '2'"
self.assertTrue(
expected in str(ctx.exception),
"{} not in {}".format( # pylint: disable=consider-using-f-string
expected, ctx.exception
),
)
def test_verify_signature_with_expired_key(self):
"""Test sig verification with expired key raises KeyExpirationError."""
signature = {
"keyid": self.expired_key_keyid,
"other_headers": "deadbeef",
"signature": "deadbeef",
}
content = b"livestock"
key = export_pubkey(self.expired_key_keyid, homedir=self.gnupg_home)
with self.assertRaises(KeyExpirationError) as ctx:
verify_signature(signature, key, content)
expected = (
"GPG key 'e8ac80c924116dabb51d4b987cb07d6d2c199c7c' "
"created on '2019-03-25 12:46 UTC' with validity period '1 day, "
"0:25:01' expired on '2019-03-26 13:11 UTC'."
)
self.assertTrue(
expected == str(ctx.exception),
"\nexpected: {}" # pylint: disable=consider-using-f-string
"\ngot: {}".format( # pylint: disable=consider-using-f-string
expected, ctx.exception
),
)
@unittest.skipIf(not have_gpg(), "gpg not found")
class TestGPGDSA(unittest.TestCase):
"""Test signature creation, verification and key export from the gpg
module"""
default_keyid = "C242A830DAAF1C2BEF604A9EF033A3A3E267B3B1"
@classmethod
def setUpClass(self): # pylint: disable=bad-classmethod-argument
# Create directory to run the tests without having everything blow up
self.working_dir = os.getcwd()
self.test_dir = os.path.realpath(tempfile.mkdtemp())
self.gnupg_home = os.path.join(self.test_dir, "dsa")
# Find keyrings
keyrings = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gpg_keyrings", "dsa"
)
shutil.copytree(keyrings, self.gnupg_home)
os.chdir(self.test_dir)
@classmethod
def tearDownClass(self): # pylint: disable=bad-classmethod-argument
"""Change back to initial working dir and remove temp test directory."""
os.chdir(self.working_dir)
shutil.rmtree(
self.test_dir, onerror=GPGTestUtils.ignore_not_found_error
)
def test_export_pubkey(self):
"""export a public key and make sure the parameters are the right ones:
since there's very little we can do to check key parameters are right
we pre-exported the public key to an x.509 SubjectPublicKeyInfo key,
which we can load with cryptography for the sake of comparison"""
# export our gpg key, using our functions
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
our_exported_key = dsa_create_pubkey(key_data)
# load same key, pre-exported with 3rd-party tooling
pem_key_basename = (
"{}.pem".format( # pylint: disable=consider-using-f-string
self.default_keyid
)
)
pem_key_path = os.path.join(self.gnupg_home, pem_key_basename)
with open(pem_key_path, "rb") as fp:
keydata = fp.read()
pem_key = serialization.load_pem_public_key(
keydata, backends.default_backend()
)
# make sure keys match
self.assertEqual(
pem_key.public_numbers().y, our_exported_key.public_numbers().y
)
self.assertEqual(
pem_key.public_numbers().parameter_numbers.g,
our_exported_key.public_numbers().parameter_numbers.g,
)
self.assertEqual(
pem_key.public_numbers().parameter_numbers.q,
our_exported_key.public_numbers().parameter_numbers.q,
)
self.assertEqual(
pem_key.public_numbers().parameter_numbers.p,
our_exported_key.public_numbers().parameter_numbers.p,
)
def test_gpg_sign_and_verify_object_with_default_key(self):
"""Create a signature using the default key on the keyring"""
test_data = b"test_data"
wrong_data = b"something malicious"
signature = create_signature(test_data, homedir=self.gnupg_home)
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key_data, test_data))
self.assertFalse(verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object(self):
"""Create a signature using a specific key on the keyring"""
test_data = b"test_data"
wrong_data = b"something malicious"
signature = create_signature(
test_data, keyid=self.default_keyid, homedir=self.gnupg_home
)
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key_data, test_data))
self.assertFalse(verify_signature(signature, key_data, wrong_data))
@unittest.skipIf(not have_gpg(), "gpg not found")
class TestGPGEdDSA(unittest.TestCase):
"""Test signature creation, verification and key export from the gpg
module"""
default_keyid = "4E630F84838BF6F7447B830B22692F5FEA9E2DD2"
@classmethod
def setUpClass(self): # pylint: disable=bad-classmethod-argument
# Create directory to run the tests without having everything blow up
self.working_dir = os.getcwd()
self.test_dir = os.path.realpath(tempfile.mkdtemp())
self.gnupg_home = os.path.join(self.test_dir, "dsa")
# Find keyrings
keyrings = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gpg_keyrings", "eddsa"
)
shutil.copytree(keyrings, self.gnupg_home)
os.chdir(self.test_dir)
@classmethod
def tearDownClass(self): # pylint: disable=bad-classmethod-argument
"""Change back to initial working dir and remove temp test directory."""
os.chdir(self.working_dir)
shutil.rmtree(
self.test_dir, onerror=GPGTestUtils.ignore_not_found_error
)
def test_gpg_sign_and_verify_object_with_default_key(self):
"""Create a signature using the default key on the keyring"""
test_data = b"test_data"
wrong_data = b"something malicious"
signature = create_signature(test_data, homedir=self.gnupg_home)
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key_data, test_data))
self.assertFalse(verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object_with_specific_key(self):
"""Create a signature using a specific key on the keyring"""
test_data = b"test_data"
wrong_data = b"something malicious"
signature = create_signature(
test_data, keyid=self.default_keyid, homedir=self.gnupg_home
)
key_data = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key_data, test_data))
self.assertFalse(verify_signature(signature, key_data, wrong_data))
def test_verify_short_signature(self):
"""Correctly verify a special-crafted short signature."""
test_data = b"hello"
signature_path = os.path.join(self.gnupg_home, "short.sig")
# Read special-crafted raw gpg signature that is one byte too short
with open(signature_path, "rb") as f:
signature_data = f.read()
# Check that the signature is padded upon parsing
# NOTE: The returned signature is a hex string and thus twice as long
signature = parse_signature_packet(signature_data)
self.assertTrue(len(signature["signature"]) == (ED25519_SIG_LENGTH * 2))
# Check that the signature can be successfully verified
key = export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(verify_signature(signature, key, test_data))
class TestVersion(unittest.TestCase):
"""Tests for the Version utility class."""
def test_version_roundtrip_string(self):
"""Version parses and formats strings correctly."""
for value, expected in [
("1.3.0", Version(1, 3, 0)),
("1.3.1", Version(1, 3, 1)),
("1.3.22", Version(1, 3, 22)),
]:
self.assertEqual(Version.from_string(value), expected)
self.assertEqual(str(expected), value)
def test_version_from_string_invalid(self):
"""Version.from_string rejects invalid inputs."""
for value in [
"1.3",
"1.33.0",
"1.3.-1",
"1.3.1a",
]:
with self.assertRaises(
ValueError, msg=f"expected error for input '{value}'"
):
Version.from_string(value)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c8b932005c8dc10078a020835246294b",
"timestamp": "",
"source": "github",
"line_count": 988,
"max_line_length": 114,
"avg_line_length": 39.49898785425101,
"alnum_prop": 0.6165022421524664,
"repo_name": "secure-systems-lab/securesystemslib",
"id": "5431de739eabf52204a14fc5283da33f9e2b4376",
"size": "39048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_gpg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "657940"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
} |
"""
Author: Benjamin Torben-Nielsen
Date: 18/08/2015
"""
import sys
import numpy as np
import scipy
import scipy.optimize
import matplotlib.pyplot as plt
import pylab as p, time
class BTStats :
'''
Compute morphometric features and statistics of a single morphology
Assume the "3 point" soma of the curated NeuroMorpho format. (see website)
B. Torben-Nielsen, 2013-01 @ BBP (from BTN legacy code)
'''
def __init__(self,tree) :
self._tree = tree
self._all_nodes = self._tree.get_nodes()
# compute some of the most used stats +
self._soma_points, self._bif_points, self._end_points = \
self.get_points_of_interest()
def get_points_of_interest(self) :
"""
Get lists containting the "points of interest", i.e., soma points, \
bifurcation points and end/terminal points.
:rtype: list of lists
"""
soma_points = []
bif_points = []
end_points = []
for node in self._all_nodes :
if len(node.get_child_nodes()) > 1 :
if node._parent_node != None :
bif_points.append(node) # the root is not a bifurcation
if len(node.get_child_nodes()) == 0 :
if node.get_parent_node().get_content()['p3d'].index != 1 :
end_points.append(node)
if node._parent_node == None :
soma_points = node
return soma_points, bif_points, end_points
"""
Global measures (1 for each tree)
"""
def approx_soma(self):
"""
*Scalar, global morphometric*
By NeuroMorpho.org convention: soma surface ~ 4*pi*r^2, \
where r is the abs(y_value) of point 2 and 3 in the SWC file
:return: soma surface in micron squared
"""
r = abs(self._tree.get_node_with_index(2).get_content()['p3d'].y)
print 'r=',r
return 4.0*np.pi*r*r
def no_bifurcations(self) :
"""
*Scalar, global morphometric*
Count the number of bifurcations points in a complete moprhology
:rtype: number of bifurcation
"""
return len(self._bif_points)
def no_terminals(self) :
"""
*Scalar, global morphometric*
Count the number of temrinal points in a complete moprhology
:rtype: number of terminals
"""
return len(self._end_points)
def no_stems(self) :
"""
*Scalar, global morphometric*
Count the number of stems in a complete moprhology (except the three \
point soma from the Neuromoprho.org standard)
:rtype: number of stems
"""
return len( self._tree.get_root().get_child_nodes() ) -2
def total_length(self) :
"""
*Scalar, global morphometric*
Calculate the total length of a complete morphology
:rtype: total length in micron
"""
L = 0
for node in self._all_nodes :
n = node.get_content()['p3d']
if(n.index not in [1,2,3]) :
p = node.get_parent_node().get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + \
(n.z-p.z)*(n.z-p.z) )
L += d
return L
def total_surface(self) :
"""
*Scalar, global morphometric*
Total neurite surface (at least, surface of all neurites excluding
the soma. In accordance to the NeuroMorpho / L-Measure standard)
:rtype: total surface in micron squared
"""
total_surf = 0
all_surfs = []
for node in self._all_nodes :
n = node.get_content()['p3d']
if n.index not in [1,2,3] :
p = node.get_parent_node().get_content()['p3d']
H = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + \
(n.z-p.z)*(n.z-p.z) )
surf = 2*np.pi*n.radius*H
all_surfs.append(surf)
total_surf = total_surf + surf
return total_surf, all_surfs
def total_volume(self) :
"""
*Scalar, global morphometric*
Total neurite volume (at least, surface of all neurites excluding
the soma. In accordance to the NeuroMorpho / L-Measure standard)
:rtype: total volume in micron cubed
"""
total_vol = 0
all_vols = []
for node in self._all_nodes :
n = node.get_content()['p3d']
if n.index not in [1,2,3] :
p = node.get_parent_node().get_content()['p3d']
H = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + \
(n.z-p.z)*(n.z-p.z) )
vol = np.pi*n.radius*n.radius*H
all_vols.append(vol)
total_vol = total_vol + vol
return total_vol, all_vols
def total_dimensions(self) :
"""
*Scalar, global morphometric*
Overall dimension of the whole moprhology. (No translation of the \
moprhology according to arbitrary axes.)
:return: dx : float
x-dimension
:return: dy : float
y-dimension
:return: dz : float
z-dimension
"""
minX = sys.maxint
maxX = -1 * sys.maxint
minY = sys.maxint
maxY = -1 * sys.maxint
minZ = sys.maxint
maxZ = -1 * sys.maxint
for node in self._all_nodes :
n = node.get_content()['p3d']
minX = n.x if n.x < minX else minX
maxX = n.x if n.x > maxX else maxX
minY = n.y if n.y < minY else minY
maxY = n.y if n.y > maxY else maxY
minZ = n.z if n.z < minZ else minZ
maxZ = n.z if n.z > maxZ else maxZ
dx = np.sqrt((maxX-minX)*(maxX-minX))
dy = np.sqrt((maxY-minY)*(maxY-minY))
dz = np.sqrt((maxZ-minZ)*(maxZ-minZ))
return dx,dy,dz
"""
Local measures
"""
def get_segment_pathlength(self,to_node) :
"""
*Vector, local morphometric*.
Length of the incoming segment. Between this node and the soma or \
another branching point. A path is defined as a stretch between \
the soma and a bifurcation point, between bifurcation points, \
or in between of a bifurcation point and a terminal point
:param to_node: node
:return: length of the incoming path in micron
"""
L = 0
if self._tree.is_leaf(to_node) :
path = self._tree.path_to_root(to_node)
L = 0
else :
path = self._tree.path_to_root(to_node)[1:]
p = to_node.get_parent_node().get_content()['p3d']
n = to_node.get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + (n.z-p.z)*(n.z-p.z) )
L = L + d
for node in path :
# print 'going along the path'
n = node.get_content()['p3d']
if len(node.get_child_nodes()) >= 2 :
return L
else :
p = node.get_parent_node().get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + (n.z-p.z)*(n.z-p.z) )
L = L + d
def get_pathlength_to_root(self,from_node) :
"""
Length of the path between from_node to the root.
another branching point
:param from_node: node
:return: length of the path between the soma and the provided node
"""
L = 0
if self._tree.is_leaf(from_node) :
path = self._tree.path_to_root(from_node)
L = 0
else :
path = self._tree.path_to_root(from_node)[1:]
p = from_node.get_parent_node().get_content()['p3d']
n = from_node.get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + (n.z-p.z)*(n.z-p.z) )
L = L + d
for node in path[:-1] :
# print 'going along the path'
n = node.get_content()['p3d']
p = node.get_parent_node().get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + (n.z-p.z)*(n.z-p.z) )
L = L + d
return L
def get_segment_Euclidean_length(self,to_node) :
"""
Euclidean length to the incoming segment. Between this node and the soma or \
another branching point
:param to_node: node
:return: Euclidean distance of the incoming path in micron
"""
L = 0
if self._tree.is_leaf(to_node) :
path = self._tree.path_to_root(to_node)
else :
path = self._tree.path_to_root(to_node)[1:]
n = to_node.get_content()['p3d']
for node in path :
if len(node.get_child_nodes()) >= 2 :
return L
else :
p = node.get_parent_node().get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + (n.z-p.z)*(n.z-p.z) )
L = d
def get_Euclidean_length_to_root(self,from_node) :
"""
euclidean length between the from_node and the root
:param from_node: node
:return: Euclidean distance between the soma and the provide node
"""
n = from_node.get_content()['p3d']
p = self._tree.get_root().get_content()['p3d']
d = np.sqrt( (n.x-p.x)*(n.x-p.x) + (n.y-p.y)*(n.y-p.y) + (n.z-p.z)*(n.z-p.z) )
return d
def degree_of_node(self,node) :
"""
Degree of a node. (The number of leaf node in the subtree mounted at \
the provided node)
:param node
:return: degree
"""
return self._tree.degree_of_node(node)
def order_of_node(self,node):
"""
Order of a node. (Going centrifugally away from the soma, the order \
increases with 1 each time a bifurcation point is passed)
:param node
:return: order
"""
return self._tree.order_of_node(node)
def partition_asymmetry(self,node) :
"""
Compute the partition asymmetry for a given node.
:param node
:return: partition asymmetry
"""
children = node.get_child_nodes()
if children == None or len(children) == 1 :
return None
d1 = self._tree.degree_of_node(children[0])
d2 = self._tree.degree_of_node(children[1])
if(d1 == 1 and d2 == 1) :
return 0 # by definition
else :
return np.abs(d1-d2)/(d1+d2-2.0)
def bifurcation_angle(self,node,where='local') :
"""
Compute the angles related to the parent - daughter constellation:
amplitude, tilt and torque.
Can be computed locally, i.e., taking the parent segment and the first
segment of the daughters, or, remote between the parent segment and
next bifurcation or terminal point
Follows: http://cng.gmu.edu:8080/Lm/help/index.htm
:param node: has to be a birufcation point, otherwise returns -1
:param where: 'local' or global
:return: amplitude
:return: tilt
:return: torque
"""
child_node1,child_node2 = self._get_child_nodes(node,where=where)
ampl_alpha = self._get_ampl_angle(child_node1)
ampl_beta = self._get_ampl_angle(child_node2)
if ampl_alpha > 360 or ampl_beta > 360 :
print 'alpha=%f, beta=%f' % (ampl_alpha,ampl_beta)
raw_input('ENTER')
# ampl_gamma= ampl_alpha - ampl_beta if ampl_beta < ampl_alpha else \
# ampl_beta - ampl_alpha
ampl_gamma = np.sqrt( (ampl_alpha-ampl_beta)**2 )
ampl_gamma = ampl_gamma if ampl_gamma <=180 else 360 -ampl_gamma
# print "alpha=%f, beta=%f, gamma=%f" % (ampl_alpha,ampl_beta,ampl_gamma)
return [ampl_gamma,-1,-1]
def bifurcation_sibling_ratio(self,node,where='local') :
"""
*Vector, local morphometric*
Ratio between the diameters of two siblings.
:param node: the parent node
:param where: string 'local' or 'remote' calculation
"""
child1,child2 = self._get_child_nodes(node,where=where)
#print 'child1=',child1,', child2=',child2, ' @ ',where
radius1 = child1.get_content()['p3d'].radius
radius2 = child2.get_content()['p3d'].radius
if radius1 > radius2 :
return radius1 / radius2
else :
return radius2 / radius1
def _get_child_nodes(self,node,where) :
children = node.get_child_nodes()
if where == 'local' :
return children[0],children[1]
else :
grandchildren = []
for child in children :
t_child = self._find_remote_child(child)
grandchildren.append(t_child)
return grandchildren[0],grandchildren[1]
def _find_remote_child(self,node) :
children = node.get_child_nodes()
t_node = node
while len(children) < 2 :
if len(children) == 0 :
# print t_node, '-> found a leaf'
return t_node
t_node = children[0]
children = t_node.get_child_nodes()
# print t_node,' -> found a bif'
return t_node
def bifurcation_ralls_ratio(self,node,precision=0.2,where='local') :
"""
*Vector, local morphometric*
Approximation of Rall's ratio. First implementation
D^p = d1^p + d2^p, p being the approximated value of Rall's ratio
"""
p_diam = node.get_content()['p3d'].radius*2
child1,child2 = self._get_child_nodes(node,where=where)
d1_diam = child1.get_content()['p3d'].radius*2
d2_diam = child2.get_content()['p3d'].radius*2
print 'pd=%f,d1=%f,d2=%f' % (p_diam,d1_diam,d2_diam)
p_lower = 1.0
p_upper = 5.0 # THE associated mismatch MUST BE NEGATIVE
mismatch=100000000
count_outer = 0
while mismatch > precision and count_outer < 20:
lower_mismatch = (np.power(d1_diam,p_lower) + np.power(d2_diam,p_lower))-np.power(p_diam,p_lower)
upper_mismatch = (np.power(d1_diam,p_upper) + np.power(d2_diam,p_upper))-np.power(p_diam,p_upper)
p_mid = (p_lower + p_upper)/2.0
mid_mismatch = (np.power(d1_diam,p_mid) + np.power(d2_diam,p_mid))-np.power(p_diam,p_mid)
# print 'p_lower=%f, p_mid=%f' % (p_lower,p_mid)
#print 'looking for the MID'
count_inner = 0
while mid_mismatch > 0 and count_inner < 20:
p_lower = p_mid
lower_mismatch = (np.power(d1_diam,p_lower) + np.power(d2_diam,p_lower))-np.power(p_diam,p_lower)
p_mid = (p_lower + p_upper)/2.0
mid_mismatch = (np.power(d1_diam,p_mid) + np.power(d2_diam,p_mid))-np.power(p_diam,p_mid)
# print 'p_lower=%f, p_mid=%f, p_upper=%f' % (p_lower,p_mid,p_upper)
# print 'lower=%f, mid=%f, upper=%f' % (lower_mismatch,mid_mismatch,upper_mismatch)
count_inner = count_inner + 1
#print 'found the MID'
# print '\nlow_m=%f,mid_m=%f,up_m=%f' % (lower_mismatch,mid_mismatch,upper_mismatch)
# print 'low_p=%f,mid_p=%f,up_p=%f' % (p_lower,p_mid,p_upper)
if upper_mismatch < mid_mismatch :
p_upper = p_mid
else :
p_lower = p_mid
mismatch = np.abs(mid_mismatch)
count_outer = count_outer + 1
#print 'mismatch=%f, p_lower=%f,p_upper=%f' % (mismatch,p_lower,p_upper)
# raw_input('Enter')
if count_outer >=19 :
return np.nan
else :
return p_mid
def bifurcation_ralls_ratio2(self,node,precision=0.2,where='local') :
"""
*Vector, local morphometric*
Approximation of Rall's ratio. Second implementation
D^p = d1^p + d2^p, p being the approximated value of Rall's ratio
"""
p_diam = node.get_content()['p3d'].radius*2
child1,child2 = self._get_child_nodes(node,where=where)
d1_diam = child1.get_content()['p3d'].radius*2
d2_diam = child2.get_content()['p3d'].radius*2
print 'pd=%f,d1=%f,d2=%f' % (p_diam,d1_diam,d2_diam)
if d1_diam >= p_diam or d2_diam >= p_diam :
return 1
p_lower = 1.0
p_upper = 5.0 # THE associated mismatch MUST BE NEGATIVE
mismatch=100000000
count_outer = 0
while mismatch > precision and count_outer < 100:
lower_mismatch = (np.power(d1_diam,p_lower) + np.power(d2_diam,p_lower))-np.power(p_diam,p_lower)
upper_mismatch = (np.power(d1_diam,p_upper) + np.power(d2_diam,p_upper))-np.power(p_diam,p_upper)
p_mid = (p_lower + p_upper)/2.0
mid_mismatch = (np.power(d1_diam,p_mid) + np.power(d2_diam,p_mid))-np.power(p_diam,p_mid)
# print 'p_lower=%f, p_mid=%f' % (p_lower,p_mid)
#print 'looking for the MID'
if upper_mismatch < mid_mismatch :
p_upper = p_mid
else :
p_lower = p_mid
mismatch = np.abs(mid_mismatch)
count_outer = count_outer + 1
#print 'mismatch=%f, p_lower=%f,p_upper=%f' % (mismatch,p_lower,p_upper)
# raw_input('Enter')
if count_outer >=19 :
return np.nan
else :
return p_mid
def _get_ampl_angle(self,node) :
"""
Compute the angle of this node on the XY plane and against the origin
"""
pos_angle = lambda x: x if x > 0 else 180 + (180+x)
a = np.rad2deg(np.arctan2(node.get_content()['p3d'].y,node.get_content()['p3d'].x))
return pos_angle(a)
| {
"content_hash": "9a5f30ee1255b1df6d81d62f138cb2d9",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 113,
"avg_line_length": 34.880907372400756,
"alnum_prop": 0.5175590721872968,
"repo_name": "WillemWybo/SGF_formalism",
"id": "c1f78c6b2ac55f3b862b11203fcc852b071087f2",
"size": "18452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/btstats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "40400"
},
{
"name": "C++",
"bytes": "18840"
},
{
"name": "Python",
"bytes": "549915"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
from slipstream.command.DescribeInstancesCommand import DescribeInstancesCommand
from slipstream_okeanos.OkeanosCommand import OkeanosCommand
class OkeanosDescribeInstances(DescribeInstancesCommand, OkeanosCommand):
def __init__(self):
super(OkeanosDescribeInstances, self).__init__()
def _vm_get_state(self, cc, vm):
return cc._vm_get_state(vm)
| {
"content_hash": "34385c2ccf57e9e47920fa814dbcb15b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 80,
"avg_line_length": 31.25,
"alnum_prop": 0.7626666666666667,
"repo_name": "CELAR/SlipStreamConnector-Okeanos",
"id": "ba3cfe46b208115749fca92dd7480fc5d7f651eb",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tar/slipstream_okeanos/OkeanosDescribeInstancesCommand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "15568"
},
{
"name": "Python",
"bytes": "78993"
},
{
"name": "Shell",
"bytes": "1924"
},
{
"name": "Smarty",
"bytes": "584"
}
],
"symlink_target": ""
} |
import sklearn.datasets as sd
import argparse
p = argparse.ArgumentParser(description="Create a toy dataset for testing tinygrad.")
p.add_argument("--task", type=str, choices=["classification", "regression"], dest="task", required=True)
p.add_argument("--rows", type=int, default=100, dest="rows", required=False, help="Number of data points")
p.add_argument("--features", type=int, default=4, dest="features", required=False, help="Number of features")
p.add_argument("--file", type=str, default=100, dest="file", required=True, help="Filename for dataset")
o = p.parse_args()
if o.task == "classification":
with open(o.file, "w") as fh:
x, y = sd.make_classification(n_samples=o.rows, n_features=o.features, n_informative=o.features, n_redundant=0)
for x_row, y_row in zip(x, y):
fh.write(("{:.4f},"*x_row.size).format(*x_row))
fh.write("{}\n".format(float(y_row)))
| {
"content_hash": "000053a20d4bb1eeec170e0f6e9574c2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 119,
"avg_line_length": 57.125,
"alnum_prop": 0.6739606126914661,
"repo_name": "nikoreun/tinygrad",
"id": "011dfdef1c6a2344a55a2dbd35817bdb4904d598",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/create_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "40085"
},
{
"name": "CMake",
"bytes": "3873"
},
{
"name": "Python",
"bytes": "914"
},
{
"name": "Shell",
"bytes": "758"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('desktop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='document2',
name='connector',
field=models.ForeignKey(blank=True, help_text='Connector.', null=True, on_delete=django.db.models.deletion.CASCADE, to='desktop.Connector', verbose_name='Connector'),
),
]
| {
"content_hash": "adf8c73b520b4beaaeccfaac3bf3e93c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 178,
"avg_line_length": 28,
"alnum_prop": 0.6484962406015038,
"repo_name": "cloudera/hue",
"id": "420751ad33951c1d5cbf06a64e957fb249f7ee60",
"size": "606",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/src/desktop/org_migrations/0002_document2_connector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.