text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Protrein Block assignation --- :mod:`pbxplore.assignment`
=========================================================
.. autofunction:: assign
"""
# Third-party module
import numpy
# Local module
from . import PB
def assign(dihedrals, pb_ref=PB.REFERENCES):
"""
Assign Protein Blocks.
Dihedral angles are provided as a dictionnary with one item per residue.
The key is the residue number, and the value is a dictionnary with phi and
psi as keys, and the dihedral angles as values.
The protein block definitions are provided as a dictionnary. Each key is a
block name, the values are lists of dihedral angles.
Parameters
----------
dihedrals : dict
Phi and psi dihedral angles for each residue.
pb_ref : dict
The definition of the protein blocks.
"""
pb_seq = ""
# Transform the dict into a numpy array with the right order
ref = numpy.array([pb_ref[key] for key in sorted(pb_ref)])
# iterate over all residues
for res in sorted(dihedrals):
angles = []
# try to get all eight angles required for PB assignement
try:
angles.append(dihedrals[res-2]["psi"])
angles.append(dihedrals[res-1]["phi"])
angles.append(dihedrals[res-1]["psi"])
angles.append(dihedrals[res ]["phi"])
angles.append(dihedrals[res ]["psi"])
angles.append(dihedrals[res+1]["phi"])
angles.append(dihedrals[res+1]["psi"])
angles.append(dihedrals[res+2]["phi"])
# check for bad angles
# (error while calculating torsion: missing atoms)
if None in angles:
pb_seq += "Z"
continue
# cannot get required angles (Nter, Cter or missign residues)
# -> cannot assign PB
# jump to next residue
except KeyError:
pb_seq += "Z"
continue
# Compute the RMSD between all reference angles and angles of the current residue
# (16*8)*(1,8) vectorization
# Note (ref - ang + 180) % 360 - 180) ensure the real difference between 2 angles
rmsda = numpy.sum(((ref - angles + 180) % 360 - 180)**2, axis=1)
# Find the PB with the lowest RMSD
pb_seq += PB.NAMES[numpy.argmin(rmsda)]
return pb_seq
| {
"content_hash": "56795faa18b91a8ff19d06950f538910",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 89,
"avg_line_length": 31.554054054054053,
"alnum_prop": 0.5892933618843683,
"repo_name": "HubLot/PBxplore",
"id": "56f1eba3bd244d23ff0cb0d6259bf452604ae848",
"size": "2383",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pbxplore/assignment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "110164"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from requests.exceptions import HTTPError
from ..forms import CreditCardPaymentFormWithName
from ..core import get_credit_card_issuer
class PaymentForm(CreditCardPaymentFormWithName):
VALID_TYPES = ['visa', 'mastercard', 'discover', 'amex']
def clean(self):
cleaned_data = super(PaymentForm, self).clean()
if not self.errors:
if not self.payment.transaction_id:
number = cleaned_data.get('number')
card_type, _card_issuer = get_credit_card_issuer(number)
request_data = {'type': card_type}
request_data.update(cleaned_data)
try:
data = self.provider.create_payment(
self.payment, cleaned_data)
except HTTPError as e:
response = e.response
if response.status_code == 400:
error_data = e.response.json()
errors = [
error['issue'] for error in error_data['details']]
else:
errors = ['Internal PayPal error']
self._errors['__all__'] = self.error_class(errors)
self.payment.change_status('error')
else:
self.payment.transaction_id = data['id']
if self.provider._capture:
self.payment.captured_amount = self.payment.total
self.payment.change_status('confirmed')
else:
self.payment.change_status('preauth')
return cleaned_data
| {
"content_hash": "ba56f87066c57c108f4ed3d053838332",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 40.45238095238095,
"alnum_prop": 0.5197174808711007,
"repo_name": "artursmet/django-payments",
"id": "8cda7562f1a71bf485f3edff0710295a06c0bf90",
"size": "1699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payments/paypal/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "899"
},
{
"name": "JavaScript",
"bytes": "2625"
},
{
"name": "Python",
"bytes": "160376"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from concurrent import futures
from functools import partial, reduce
import json
import numpy as np
import os
import re
import operator
import urllib.parse
import pyarrow as pa
import pyarrow.lib as lib
import pyarrow._parquet as _parquet
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
FileMetaData, RowGroupMetaData,
ColumnChunkMetaData,
ParquetSchema, ColumnSchema)
from pyarrow.fs import (LocalFileSystem, FileSystem,
_resolve_filesystem_and_path, _ensure_filesystem)
from pyarrow import filesystem as legacyfs
from pyarrow.util import guid, _is_path_like, _stringify_path
_URI_STRIP_SCHEMES = ('hdfs',)
def _parse_uri(path):
path = _stringify_path(path)
parsed_uri = urllib.parse.urlparse(path)
if parsed_uri.scheme in _URI_STRIP_SCHEMES:
return parsed_uri.path
else:
# ARROW-4073: On Windows returning the path with the scheme
# stripped removes the drive letter, if any
return path
def _get_filesystem_and_path(passed_filesystem, path):
if passed_filesystem is None:
return legacyfs.resolve_filesystem_and_path(path, passed_filesystem)
else:
passed_filesystem = legacyfs._ensure_filesystem(passed_filesystem)
parsed_path = _parse_uri(path)
return passed_filesystem, parsed_path
def _check_contains_null(val):
if isinstance(val, bytes):
for byte in val:
if isinstance(byte, bytes):
compare_to = chr(0)
else:
compare_to = 0
if byte == compare_to:
return True
elif isinstance(val, str):
return '\x00' in val
return False
def _check_filters(filters, check_null_strings=True):
"""
Check if filters are well-formed.
"""
if filters is not None:
if len(filters) == 0 or any(len(f) == 0 for f in filters):
raise ValueError("Malformed filters")
if isinstance(filters[0][0], str):
# We have encountered the situation where we have one nesting level
# too few:
# We have [(,,), ..] instead of [[(,,), ..]]
filters = [filters]
if check_null_strings:
for conjunction in filters:
for col, op, val in conjunction:
if (
isinstance(val, list) and
all(_check_contains_null(v) for v in val) or
_check_contains_null(val)
):
raise NotImplementedError(
"Null-terminated binary strings are not supported "
"as filter values."
)
return filters
_DNF_filter_doc = """Predicates are expressed in disjunctive normal form (DNF), like
``[[('x', '=', 0), ...], ...]``. DNF allows arbitrary boolean logical
combinations of single column predicates. The innermost tuples each
describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and
multiple column predicate. Finally, the most outer list combines these
filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation."""
def _filters_to_expression(filters):
"""
Check if filters are well-formed.
See _DNF_filter_doc above for more details.
"""
import pyarrow.dataset as ds
if isinstance(filters, ds.Expression):
return filters
filters = _check_filters(filters, check_null_strings=False)
def convert_single_predicate(col, op, val):
field = ds.field(col)
if op == "=" or op == "==":
return field == val
elif op == "!=":
return field != val
elif op == '<':
return field < val
elif op == '>':
return field > val
elif op == '<=':
return field <= val
elif op == '>=':
return field >= val
elif op == 'in':
return field.isin(val)
elif op == 'not in':
return ~field.isin(val)
else:
raise ValueError(
'"{0}" is not a valid operator in predicates.'.format(
(col, op, val)))
disjunction_members = []
for conjunction in filters:
conjunction_members = [
convert_single_predicate(col, op, val)
for col, op, val in conjunction
]
disjunction_members.append(reduce(operator.and_, conjunction_members))
return reduce(operator.or_, disjunction_members)
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile:
"""
Reader interface for a single Parquet file.
Parameters
----------
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarrow.BufferReader.
metadata : FileMetaData, default None
Use existing metadata object, rather than reading from file.
common_metadata : FileMetaData, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
"""
def __init__(self, source, metadata=None, common_metadata=None,
read_dictionary=None, memory_map=False, buffer_size=0):
self.reader = ParquetReader()
self.reader.open(source, use_memory_map=memory_map,
buffer_size=buffer_size,
read_dictionary=read_dictionary, metadata=metadata)
self.common_metadata = common_metadata
self._nested_paths_by_prefix = self._build_nested_paths()
def _build_nested_paths(self):
paths = self.reader.column_paths
result = defaultdict(list)
for i, path in enumerate(paths):
key = path[0]
rest = path[1:]
while True:
result[key].append(i)
if not rest:
break
key = '.'.join((key, rest[0]))
rest = rest[1:]
return result
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
"""
Return the Parquet schema, unconverted to Arrow types
"""
return self.metadata.schema
@property
def schema_arrow(self):
"""
Return the inferred Arrow schema, converted from the whole Parquet
file's schema
"""
return self.reader.schema_arrow
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file.
Parameters
----------
columns: list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
use_threads=use_threads)
def read_row_groups(self, row_groups, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a multiple row groups from a Parquet file.
Parameters
----------
row_groups: list
Only these row groups will be read from the file.
columns: list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row groups as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_groups(row_groups,
column_indices=column_indices,
use_threads=use_threads)
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read a Table from Parquet format,
Parameters
----------
columns: list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
use_threads=use_threads)
def scan_contents(self, columns=None, batch_size=65536):
"""
Read contents of file for the given columns and batch size.
Notes
-----
This function's primary purpose is benchmarking.
The scan is executed on a single thread.
Parameters
----------
columns : list of integers, default None
Select columns to read, if None scan all columns.
batch_size : int, default 64K
Number of rows to read at a time internally.
Returns
-------
num_rows : number of rows in file
"""
column_indices = self._get_column_indices(columns)
return self.reader.scan_contents(column_indices,
batch_size=batch_size)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = []
for name in column_names:
if name in self._nested_paths_by_prefix:
indices.extend(self._nested_paths_by_prefix[name])
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += [self.reader.column_name_idx(descr)
for descr in index_columns
if not isinstance(descr, dict)]
return indices
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
def _sanitized_spark_field_name(name):
return _SPARK_DISALLOWED_CHARS.sub('_', name)
def _sanitize_schema(schema, flavor):
if 'spark' in flavor:
sanitized_fields = []
schema_changed = False
for field in schema:
name = field.name
sanitized_name = _sanitized_spark_field_name(name)
if sanitized_name != name:
schema_changed = True
sanitized_field = pa.field(sanitized_name, field.type,
field.nullable, field.metadata)
sanitized_fields.append(sanitized_field)
else:
sanitized_fields.append(field)
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
return new_schema, schema_changed
else:
return schema, False
def _sanitize_table(table, new_schema, flavor):
# TODO: This will not handle prohibited characters in nested field names
if 'spark' in flavor:
column_data = [table[i] for i in range(table.num_columns)]
return pa.Table.from_arrays(column_data, schema=new_schema)
else:
return table
_parquet_writer_arg_docs = """version : {"1.0", "2.0"}, default "1.0"
Determine which Parquet logical types are available for use, whether the
reduced set from the Parquet 1.x.x format or the expanded logical types
added in format version 2.0.0 and after. Note that files written with
version='2.0' may not be readable in all Parquet implementations, so
version='1.0' is likely the choice that maximizes file compatibility. Some
features, such as lossless storage of nanosecond timestamps as INT64
physical storage, are only available with version='2.0'. The Parquet 2.0.0
format version also introduced a new serialized data page format; this can
be enabled separately using the data_page_version option.
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
use_deprecated_int96_timestamps : bool, default None
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : str, default None
Cast timestamps a particular resolution. The defaults depends on `version`.
For ``version='1.0'`` (the default), nanoseconds will be cast to
microseconds ('us'), and seconds to milliseconds ('ms') by default. For
``version='2.0'``, the original resolution is preserved and no casting
is done by default. The casting might result in loss of data, in which
case ``allow_truncated_timestamps=True`` can be used to suppress the
raised exception.
Valid values: {None, 'ms', 'us'}
data_page_size : int, default None
Set a target threshold for the approximate encoded size of data
pages within a column chunk (in bytes). If None, use the default data page
size of 1MByte.
allow_truncated_timestamps : bool, default False
Allow loss of data when coercing timestamps to a particular
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
'ms', do not raise an exception.
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
write_statistics : bool or list
Specify if we should write statistics in general (default is True) or only
for some columns.
flavor : {'spark'}, default None
Sanitize schema or set other compatibility options to work with
various target systems.
filesystem : FileSystem, default None
If nothing passed, will be inferred from `where` if path-like, else
`where` is already a file-like object so no filesystem is needed.
compression_level: int or dict, default None
Specify the compression level for a codec, either on a general basis or
per-column. If None is passed, arrow selects the compression level for
the compression codec in use. The compression level has a different
meaning for each codec, so you have to read the documentation of the
codec you are using.
An exception is thrown if the compression codec does not allow specifying
a compression level.
use_byte_stream_split: bool or list, default False
Specify if the byte_stream_split encoding should be used in general or
only for some columns. If both dictionary and byte_stream_stream are
enabled, then dictionary is preferred.
The byte_stream_split encoding is valid only for floating-point data types
and should be combined with a compression codec.
data_page_version : {"1.0", "2.0"}, default "1.0"
The serialized Parquet data page format version to write, defaults to
1.0. This does not impact the file schema logical types and Arrow to
Parquet type casting behavior; for that use the "version" option.
"""
class ParquetWriter:
__doc__ = """
Class for incrementally building a Parquet file for Arrow tables.
Parameters
----------
where : path or file-like object
schema : arrow Schema
{}
**options : dict
If options contains a key `metadata_collector` then the
corresponding value is assumed to be a list (or any object with
`.append` method) that will be filled with the file metadata instance
of the written file.
""".format(_parquet_writer_arg_docs)
def __init__(self, where, schema, filesystem=None,
flavor=None,
version='1.0',
use_dictionary=True,
compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
compression_level=None,
use_byte_stream_split=False,
writer_engine_version=None,
data_page_version='1.0',
**options):
if use_deprecated_int96_timestamps is None:
# Use int96 timestamps for Spark
if flavor is not None and 'spark' in flavor:
use_deprecated_int96_timestamps = True
else:
use_deprecated_int96_timestamps = False
self.flavor = flavor
if flavor is not None:
schema, self.schema_changed = _sanitize_schema(schema, flavor)
else:
self.schema_changed = False
self.schema = schema
self.where = where
# If we open a file using a filesystem, store file handle so we can be
# sure to close it when `self.close` is called.
self.file_handle = None
filesystem, path = _resolve_filesystem_and_path(
where, filesystem, allow_legacy_filesystem=True
)
if filesystem is not None:
if isinstance(filesystem, legacyfs.FileSystem):
# legacy filesystem (eg custom subclass)
# TODO deprecate
sink = self.file_handle = filesystem.open(path, 'wb')
else:
# ARROW-10480: do not auto-detect compression. While
# a filename like foo.parquet.gz is nonconforming, it
# shouldn't implicitly apply compression.
sink = self.file_handle = filesystem.open_output_stream(
path, compression=None)
else:
sink = where
self._metadata_collector = options.pop('metadata_collector', None)
engine_version = 'V2'
self.writer = _parquet.ParquetWriter(
sink, schema,
version=version,
compression=compression,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
writer_engine_version=engine_version,
data_page_version=data_page_version,
**options)
self.is_open = True
def __del__(self):
if getattr(self, 'is_open', False):
self.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# return false since we want to propagate exceptions
return False
def write_table(self, table, row_group_size=None):
if self.schema_changed:
table = _sanitize_table(table, self.schema, self.flavor)
assert self.is_open
if not table.schema.equals(self.schema, check_metadata=False):
msg = ('Table schema does not match schema used to create file: '
'\ntable:\n{!s} vs. \nfile:\n{!s}'
.format(table.schema, self.schema))
raise ValueError(msg)
self.writer.write_table(table, row_group_size=row_group_size)
def close(self):
if self.is_open:
self.writer.close()
self.is_open = False
if self._metadata_collector is not None:
self._metadata_collector.append(self.writer.metadata)
if self.file_handle is not None:
self.file_handle.close()
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece:
"""
A single chunk of a potentially larger Parquet dataset to read.
The arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table.
Parameters
----------
path : str or pathlib.Path
Path to file in the file system where this piece is located.
open_file_func : callable
Function to use for obtaining file handle to dataset piece.
partition_keys : list of tuples
Two-element tuples of ``(column name, ordinal index)``.
row_group : int, default None
Row group to load. By default, reads all row groups.
"""
def __init__(self, path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
self.path = _stringify_path(path)
self.open_file_func = open_file_func
self.row_group = row_group
self.partition_keys = partition_keys or []
self.file_options = file_options or {}
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __repr__(self):
return ('{}({!r}, row_group={!r}, partition_keys={!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{}={}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={}'.format(self.row_group)
return result
def get_metadata(self):
"""
Return the file's metadata.
Returns
-------
metadata : FileMetaData
"""
f = self.open()
return f.metadata
def open(self):
"""
Return instance of ParquetFile.
"""
reader = self.open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader, **self.file_options)
return reader
def read(self, columns=None, use_threads=True, partitions=None,
file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table.
Parameters
----------
columns : list of column names, default None
use_threads : bool, default True
Perform multi-threaded column reads.
partitions : ParquetPartitions, default None
file : file-like object
Passed to ParquetFile.
Returns
-------
table : pyarrow.Table
"""
if self.open_file_func is not None:
reader = self.open()
elif file is not None:
reader = ParquetFile(file, **self.file_options)
else:
# try to read the local path
reader = ParquetFile(self.path, **self.file_options)
options = dict(columns=columns,
use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.full(len(table), index, dtype='i4')
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
table = table.append_column(name, arr)
return table
class PartitionSet:
"""
A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions:
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def equals(self, other):
if not isinstance(other, ParquetPartitions):
raise TypeError('`other` must be an instance of ParquetPartitions')
return (self.levels == other.levels and
self.partition_names == other.partition_names)
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level.
Example:
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : str
The partition name
key : str or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def filter_accepts_partition(self, part_key, filter, level):
p_column, p_value_index = part_key
f_column, op, f_value = filter
if p_column != f_column:
return True
f_type = type(f_value)
if isinstance(f_value, set):
if not f_value:
raise ValueError("Cannot use empty set as filter value")
if op not in {'in', 'not in'}:
raise ValueError("Op '%s' not supported with set value",
op)
if len({type(item) for item in f_value}) != 1:
raise ValueError("All elements of set '%s' must be of"
" same type", f_value)
f_type = type(next(iter(f_value)))
p_value = f_type(self.levels[level]
.dictionary[p_value_index].as_py())
if op == "=" or op == "==":
return p_value == f_value
elif op == "!=":
return p_value != f_value
elif op == '<':
return p_value < f_value
elif op == '>':
return p_value > f_value
elif op == '<=':
return p_value <= f_value
elif op == '>=':
return p_value >= f_value
elif op == 'in':
return p_value in f_value
elif op == 'not in':
return p_value not in f_value
else:
raise ValueError("'%s' is not a valid operator in predicates.",
filter[1])
class ParquetManifest:
def __init__(self, dirpath, open_file_func=None, filesystem=None,
pathsep='/', partition_scheme='hive', metadata_nthreads=1):
filesystem, dirpath = _get_filesystem_and_path(filesystem, dirpath)
self.filesystem = filesystem
self.open_file_func = open_file_func
self.pathsep = pathsep
self.dirpath = _stringify_path(dirpath)
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self._metadata_nthreads = metadata_nthreads
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=metadata_nthreads)
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
# Due to concurrency, pieces will potentially by out of order if the
# dataset is partitioned so we sort them to yield stable results
self.pieces.sort(key=lambda piece: piece.path)
if self.common_metadata_path is None:
# _common_metadata is a subset of _metadata
self.common_metadata_path = self.metadata_path
self._thread_pool.shutdown()
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif self._should_silently_exclude(path):
continue
else:
filtered_files.append(full_path)
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(filtered_files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or # Checksums
file_name.endswith('_$folder$') or # HDFS directories in S3
file_name.startswith('.') or # Hidden files starting with .
file_name.startswith('_') or # Hidden files starting with _
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
futures_list = []
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
# If you have less threads than levels, the wait call will block
# indefinitely due to multiple waits within a thread.
if level < self._metadata_nthreads:
future = self._thread_pool.submit(self._visit_level,
level + 1,
path,
dir_part_keys)
futures_list.append(future)
else:
self._visit_level(level + 1, path, dir_part_keys)
if futures_list:
futures.wait(futures_list)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece(path, partition_keys=part_keys,
open_file_func=self.open_file_func)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return (tail.startswith('_') or tail.startswith('.')) and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
class _ParquetDatasetMetadata:
__slots__ = ('fs', 'memory_map', 'read_dictionary', 'common_metadata',
'buffer_size')
def _open_dataset_file(dataset, path, meta=None):
if (dataset.fs is not None and
not isinstance(dataset.fs, legacyfs.LocalFileSystem)):
path = dataset.fs.open(path, mode='rb')
return ParquetFile(
path,
metadata=meta,
memory_map=dataset.memory_map,
read_dictionary=dataset.read_dictionary,
common_metadata=dataset.common_metadata,
buffer_size=dataset.buffer_size
)
_read_docstring_common = """\
read_dictionary : list, default None
List of names or column paths (for nested types) to read directly
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
a flat column as dictionary-encoded pass the column name. For
nested types, you must pass the full column "path", which could be
something like level1.level2.list.item. Refer to the Parquet
file's schema to obtain the paths.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
partitioning : Partitioning or str or list of str, default "hive"
The partitioning scheme for a partitioned dataset. The default of "hive"
assumes directory names with key=value pairs like "/year=2009/month=11".
In addition, a scheme like "/2009/11" is also supported, in which case
you need to specify the field names or a full schema. See the
``pyarrow.dataset.partitioning()`` function for more details."""
class ParquetDataset:
__doc__ = """
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories.
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas.
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter.
split_row_groups : bool, default False
Divide files into pieces for each row group in the file.
validate_schema : bool, default True
Check that individual file schemas are all the same / compatible.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{1}
metadata_nthreads: int, default 1
How many threads to allow the thread pool which is used to read the
dataset metadata. Increasing this is helpful to read partitioned
datasets.
{0}
use_legacy_dataset : bool, default True
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). Among other things, this allows to pass
`filters` for all columns and not only the partition keys, enables
different partitioning schemes, etc.
""".format(_read_docstring_common, _DNF_filter_doc)
def __new__(cls, path_or_paths=None, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=None):
if use_legacy_dataset is None:
# if a new filesystem is passed -> default to new implementation
if isinstance(filesystem, FileSystem):
use_legacy_dataset = False
# otherwise the default is still True
else:
use_legacy_dataset = True
if not use_legacy_dataset:
return _ParquetDatasetV2(path_or_paths, filesystem=filesystem,
filters=filters,
partitioning=partitioning,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size,
# unsupported keywords
schema=schema, metadata=metadata,
split_row_groups=split_row_groups,
validate_schema=validate_schema,
metadata_nthreads=metadata_nthreads)
self = object.__new__(cls)
return self
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=True):
if partitioning != "hive":
raise ValueError(
'Only "hive" for hive-like partitioning is supported when '
'using use_legacy_dataset=True')
self._metadata = _ParquetDatasetMetadata()
a_path = path_or_paths
if isinstance(a_path, list):
a_path = a_path[0]
self._metadata.fs, _ = _get_filesystem_and_path(filesystem, a_path)
if isinstance(path_or_paths, list):
self.paths = [_parse_uri(path) for path in path_or_paths]
else:
self.paths = _parse_uri(path_or_paths)
self._metadata.read_dictionary = read_dictionary
self._metadata.memory_map = memory_map
self._metadata.buffer_size = buffer_size
(self.pieces,
self.partitions,
self.common_metadata_path,
self.metadata_path) = _make_manifest(
path_or_paths, self.fs, metadata_nthreads=metadata_nthreads,
open_file_func=partial(_open_dataset_file, self._metadata)
)
if self.common_metadata_path is not None:
with self.fs.open(self.common_metadata_path) as f:
self._metadata.common_metadata = read_metadata(
f,
memory_map=memory_map
)
else:
self._metadata.common_metadata = None
if metadata is None and self.metadata_path is not None:
with self.fs.open(self.metadata_path) as f:
self.metadata = read_metadata(f, memory_map=memory_map)
else:
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if filters is not None:
filters = _check_filters(filters)
self._filter(filters)
if validate_schema:
self.validate_schemas()
def equals(self, other):
if not isinstance(other, ParquetDataset):
raise TypeError('`other` must be an instance of ParquetDataset')
if self.fs.__class__ != other.fs.__class__:
return False
for prop in ('paths', 'memory_map', 'pieces', 'partitions',
'common_metadata_path', 'metadata_path',
'common_metadata', 'metadata', 'schema',
'buffer_size', 'split_row_groups'):
if getattr(self, prop) != getattr(other, prop):
return False
return True
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def validate_schemas(self):
if self.metadata is None and self.schema is None:
if self.common_metadata is not None:
self.schema = self.common_metadata.schema
else:
self.schema = self.pieces[0].get_metadata().schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all compatible
dataset_schema = self.schema.to_arrow_schema()
# Exclude the partition columns from the schema, they are provided
# by the path, not the DatasetPiece
if self.partitions is not None:
for partition_name in self.partitions.partition_names:
if dataset_schema.get_field_index(partition_name) != -1:
field_idx = dataset_schema.get_field_index(partition_name)
dataset_schema = dataset_schema.remove(field_idx)
for piece in self.pieces:
file_metadata = piece.get_metadata()
file_schema = file_metadata.schema.to_arrow_schema()
if not dataset_schema.equals(file_schema, check_metadata=False):
raise ValueError('Schema in {!s} was different. \n'
'{!s}\n\nvs\n\n{!s}'
.format(piece, file_schema,
dataset_schema))
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the file.
use_threads : bool, default True
Perform multi-threaded column reads
use_pandas_metadata : bool, default False
Passed through to each dataset piece.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
tables = []
for piece in self.pieces:
table = piece.read(columns=columns, use_threads=use_threads,
partitions=self.partitions,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _filter(self, filters):
accepts_filter = self.partitions.filter_accepts_partition
def one_filter_accepts(piece, filter):
return all(accepts_filter(part_key, filter, level)
for level, part_key in enumerate(piece.partition_keys))
def all_filters_accept(piece):
return any(all(one_filter_accepts(piece, f) for f in conjunction)
for conjunction in filters)
self.pieces = [p for p in self.pieces if all_filters_accept(p)]
fs = property(operator.attrgetter('_metadata.fs'))
memory_map = property(operator.attrgetter('_metadata.memory_map'))
read_dictionary = property(
operator.attrgetter('_metadata.read_dictionary')
)
common_metadata = property(
operator.attrgetter('_metadata.common_metadata')
)
buffer_size = property(operator.attrgetter('_metadata.buffer_size'))
def _make_manifest(path_or_paths, fs, pathsep='/', metadata_nthreads=1,
open_file_func=None):
partitions = None
common_metadata_path = None
metadata_path = None
if isinstance(path_or_paths, list) and len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if _is_path_like(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
open_file_func=open_file_func,
pathsep=getattr(fs, "pathsep", "/"),
metadata_nthreads=metadata_nthreads)
common_metadata_path = manifest.common_metadata_path
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise OSError('Passed non-file path: {}'
.format(path))
piece = ParquetDatasetPiece(path, open_file_func=open_file_func)
pieces.append(piece)
return pieces, partitions, common_metadata_path, metadata_path
class _ParquetDatasetV2:
"""
ParquetDataset shim using the Dataset API under the hood.
"""
def __init__(self, path_or_paths, filesystem=None, filters=None,
partitioning="hive", read_dictionary=None, buffer_size=None,
memory_map=False, ignore_prefixes=None, **kwargs):
import pyarrow.dataset as ds
# Raise error for not supported keywords
for keyword, default in [
("schema", None), ("metadata", None),
("split_row_groups", False), ("validate_schema", True),
("metadata_nthreads", 1)]:
if keyword in kwargs and kwargs[keyword] is not default:
raise ValueError(
"Keyword '{0}' is not yet supported with the new "
"Dataset API".format(keyword))
# map format arguments
read_options = {}
if buffer_size:
read_options.update(use_buffered_stream=True,
buffer_size=buffer_size)
if read_dictionary is not None:
read_options.update(dictionary_columns=read_dictionary)
# map filters to Expressions
self._filters = filters
self._filter_expression = filters and _filters_to_expression(filters)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(
filesystem, use_mmap=memory_map)
elif filesystem is None and memory_map:
# if memory_map is specified, assume local file system (string
# path can in principle be URI for any filesystem)
filesystem = LocalFileSystem(use_mmap=memory_map)
# check for single fragment dataset
single_file = None
if isinstance(path_or_paths, list):
if len(path_or_paths) == 1:
single_file = path_or_paths[0]
else:
if _is_path_like(path_or_paths):
path = str(path_or_paths)
if filesystem is None:
# path might be a URI describing the FileSystem as well
try:
filesystem, path = FileSystem.from_uri(path)
except ValueError:
filesystem = LocalFileSystem(use_mmap=memory_map)
if filesystem.get_file_info(path).is_file:
single_file = path
else:
single_file = path_or_paths
if single_file is not None:
self._enable_parallel_column_conversion = True
read_options.update(enable_parallel_column_conversion=True)
parquet_format = ds.ParquetFileFormat(read_options=read_options)
fragment = parquet_format.make_fragment(single_file, filesystem)
self._dataset = ds.FileSystemDataset(
[fragment], schema=fragment.physical_schema,
format=parquet_format,
filesystem=fragment.filesystem
)
return
else:
self._enable_parallel_column_conversion = False
parquet_format = ds.ParquetFileFormat(read_options=read_options)
# check partitioning to enable dictionary encoding
if partitioning == "hive":
partitioning = ds.HivePartitioning.discover(
infer_dictionary=True)
self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
format=parquet_format,
partitioning=partitioning,
ignore_prefixes=ignore_prefixes)
@property
def schema(self):
return self._dataset.schema
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read (multiple) Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the dataset. The partition fields
are not automatically included (in contrast to when setting
``use_legacy_dataset=True``).
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
# if use_pandas_metadata, we need to include index columns in the
# column selection, to be able to restore those in the pandas DataFrame
metadata = self.schema.metadata
if columns is not None and use_pandas_metadata:
if metadata and b'pandas' in metadata:
# RangeIndex can be represented as dict instead of column name
index_columns = [
col for col in _get_pandas_index_columns(metadata)
if not isinstance(col, dict)
]
columns = columns + list(set(index_columns) - set(columns))
if self._enable_parallel_column_conversion:
if use_threads:
# Allow per-column parallelism; would otherwise cause
# contention in the presence of per-file parallelism.
use_threads = False
table = self._dataset.to_table(
columns=columns, filter=self._filter_expression,
use_threads=use_threads
)
# if use_pandas_metadata, restore the pandas metadata (which gets
# lost if doing a specific `columns` selection in to_table)
if use_pandas_metadata:
if metadata and b"pandas" in metadata:
new_metadata = table.schema.metadata or {}
new_metadata.update({b"pandas": metadata[b"pandas"]})
table = table.replace_schema_metadata(new_metadata)
return table
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
"""
return self.read(use_pandas_metadata=True, **kwargs)
@property
def pieces(self):
# TODO raise deprecation warning
return list(self._dataset.get_fragments())
_read_table_docstring = """
{0}
Parameters
----------
source: str, pyarrow.NativeFile, or file-like object
If a string passed, can be a single file name or directory name. For
file-like objects, only read a single file. Use pyarrow.BufferReader to
read a file contained in a bytes or buffer-like object.
columns: list
If not None, only these columns will be read from the file. A column
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
metadata : FileMetaData
If separately computed
{1}
use_legacy_dataset : bool, default False
By default, `read_table` uses the new Arrow Datasets API since
pyarrow 1.0.0. Among other things, this allows to pass `filters`
for all columns and not only the partition keys, enables
different partitioning schemes, etc.
Set to True to use the legacy behaviour.
ignore_prefixes : list, optional
Files matching any of these prefixes will be ignored by the
discovery process if use_legacy_dataset=False.
This is matched to the basename of a path.
By default this is ['.', '_'].
Note that discovery happens only if a directory is passed as source.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{3}
Returns
-------
{2}
"""
def read_table(source, columns=None, use_threads=True, metadata=None,
use_pandas_metadata=False, memory_map=False,
read_dictionary=None, filesystem=None, filters=None,
buffer_size=0, partitioning="hive", use_legacy_dataset=False,
ignore_prefixes=None):
if not use_legacy_dataset:
if metadata is not None:
raise ValueError(
"The 'metadata' keyword is no longer supported with the new "
"datasets-based implementation. Specify "
"'use_legacy_dataset=True' to temporarily recover the old "
"behaviour."
)
try:
dataset = _ParquetDatasetV2(
source,
filesystem=filesystem,
partitioning=partitioning,
memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filters=filters,
ignore_prefixes=ignore_prefixes,
)
except ImportError:
# fall back on ParquetFile for simple cases when pyarrow.dataset
# module is not available
if filters is not None:
raise ValueError(
"the 'filters' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
if partitioning != "hive":
raise ValueError(
"the 'partitioning' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
filesystem, path = _resolve_filesystem_and_path(source, filesystem)
if filesystem is not None:
source = filesystem.open_input_file(path)
# TODO test that source is not a directory or a list
dataset = ParquetFile(
source, metadata=metadata, read_dictionary=read_dictionary,
memory_map=memory_map, buffer_size=buffer_size)
return dataset.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if ignore_prefixes is not None:
raise ValueError(
"The 'ignore_prefixes' keyword is only supported when "
"use_legacy_dataset=False")
if _is_path_like(source):
pf = ParquetDataset(source, metadata=metadata, memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filesystem=filesystem, filters=filters,
partitioning=partitioning)
else:
pf = ParquetFile(source, metadata=metadata,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size)
return pf.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
read_table.__doc__ = _read_table_docstring.format(
"""Read a Table from Parquet format
Note: starting with pyarrow 1.0, the default for `use_legacy_dataset` is
switched to False.""",
"\n".join((_read_docstring_common,
"""use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded""")),
"""pyarrow.Table
Content of the file as a table (of columns)""",
_DNF_filter_doc)
def read_pandas(source, columns=None, use_threads=True, memory_map=False,
metadata=None, filters=None, buffer_size=0,
use_legacy_dataset=True, ignore_prefixes=None):
return read_table(
source,
columns=columns,
use_threads=use_threads,
metadata=metadata,
filters=filters,
memory_map=memory_map,
buffer_size=buffer_size,
use_pandas_metadata=True,
use_legacy_dataset=use_legacy_dataset,
ignore_prefixes=ignore_prefixes
)
read_pandas.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format, also reading DataFrame\n'
'index values if known in the file metadata',
_read_docstring_common,
"""pyarrow.Table
Content of the file as a Table of Columns, including DataFrame
indexes as columns""",
_DNF_filter_doc)
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
coerce_timestamps=None,
allow_truncated_timestamps=False,
data_page_size=None, flavor=None,
filesystem=None,
compression_level=None,
use_byte_stream_split=False,
data_page_version='1.0',
**kwargs):
row_group_size = kwargs.pop('chunk_size', row_group_size)
use_int96 = use_deprecated_int96_timestamps
try:
with ParquetWriter(
where, table.schema,
filesystem=filesystem,
version=version,
flavor=flavor,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
coerce_timestamps=coerce_timestamps,
data_page_size=data_page_size,
allow_truncated_timestamps=allow_truncated_timestamps,
compression=compression,
use_deprecated_int96_timestamps=use_int96,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
data_page_version=data_page_version,
**kwargs) as writer:
writer.write_table(table, row_group_size=row_group_size)
except Exception:
if _is_path_like(where):
try:
os.remove(_stringify_path(where))
except os.error:
pass
raise
write_table.__doc__ = """
Write a Table to Parquet format.
Parameters
----------
table : pyarrow.Table
where: string or pyarrow.NativeFile
row_group_size: int
The number of rows per rowgroup
{}
""".format(_parquet_writer_arg_docs)
def _mkdir_if_not_exists(fs, path):
if fs._isfilestore() and not fs.exists(path):
try:
fs.mkdir(path)
except OSError:
assert fs.exists(path)
def write_to_dataset(table, root_path, partition_cols=None,
partition_filename_cb=None, filesystem=None,
use_legacy_dataset=True, **kwargs):
"""Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : str, pathlib.Path
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
partition_filename_cb : callable,
A callback function that takes the partition key(s) as an argument
and allow you to override the partition filename. If nothing is
passed, the filename will consist of a uuid.
use_legacy_dataset : bool, default True
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). This is more efficient when using partition
columns, but does not (yet) support `partition_filename_cb` and
`metadata_collector` keywords.
**kwargs : dict,
Additional kwargs for write_table function. See docstring for
`write_table` or `ParquetWriter` for more information.
Using `metadata_collector` in kwargs allows one to collect the
file metadata instances of dataset pieces. The file paths in the
ColumnChunkMetaData will be set relative to `root_path`.
"""
if not use_legacy_dataset:
import pyarrow.dataset as ds
# extract non-file format options
schema = kwargs.pop("schema", None)
use_threads = kwargs.pop("use_threads", True)
# raise for unsupported keywords
msg = (
"The '{}' argument is not supported with the new dataset "
"implementation."
)
metadata_collector = kwargs.pop('metadata_collector', None)
if metadata_collector is not None:
raise ValueError(msg.format("metadata_collector"))
if partition_filename_cb is not None:
raise ValueError(msg.format("partition_filename_cb"))
# map format arguments
parquet_format = ds.ParquetFileFormat()
write_options = parquet_format.make_write_options(**kwargs)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(filesystem)
partitioning = None
if partition_cols:
part_schema = table.select(partition_cols).schema
partitioning = ds.partitioning(part_schema, flavor="hive")
ds.write_dataset(
table, root_path, filesystem=filesystem,
format=parquet_format, file_options=write_options, schema=schema,
partitioning=partitioning, use_threads=use_threads)
return
fs, root_path = legacyfs.resolve_filesystem_and_path(root_path, filesystem)
_mkdir_if_not_exists(fs, root_path)
metadata_collector = kwargs.pop('metadata_collector', None)
if partition_cols is not None and len(partition_cols) > 0:
df = table.to_pandas()
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup, schema=subschema,
safe=False)
_mkdir_if_not_exists(fs, '/'.join([root_path, subdir]))
if partition_filename_cb:
outfile = partition_filename_cb(keys)
else:
outfile = guid() + '.parquet'
relative_path = '/'.join([subdir, outfile])
full_path = '/'.join([root_path, relative_path])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(relative_path)
else:
if partition_filename_cb:
outfile = partition_filename_cb(None)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(outfile)
def write_metadata(schema, where, metadata_collector=None, **kwargs):
"""
Write metadata-only Parquet file from schema. This can be used with
`write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
files.
Parameters
----------
schema : pyarrow.Schema
where: string or pyarrow.NativeFile
metadata_collector:
**kwargs : dict,
Additional kwargs for ParquetWriter class. See docstring for
`ParquetWriter` for more information.
Examples
--------
Write a dataset and collect metadata information.
>>> metadata_collector = []
>>> write_to_dataset(
... table, root_path,
... metadata_collector=metadata_collector, **writer_kwargs)
Write the `_common_metadata` parquet file without row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_common_metadata', **writer_kwargs)
Write the `_metadata` parquet file with row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_metadata',
... metadata_collector=metadata_collector, **writer_kwargs)
"""
writer = ParquetWriter(where, schema, **kwargs)
writer.close()
if metadata_collector is not None:
# ParquetWriter doesn't expose the metadata until it's written. Write
# it and read it again.
metadata = read_metadata(where)
for m in metadata_collector:
metadata.append_row_groups(m)
metadata.write_metadata_file(where)
def read_metadata(where, memory_map=False):
"""
Read FileMetadata from footer of a single Parquet file.
Parameters
----------
where : str (filepath) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where, memory_map=memory_map).metadata
def read_schema(where, memory_map=False):
"""
Read effective Arrow schema from Parquet file metadata.
Parameters
----------
where : str (filepath) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where, memory_map=memory_map).schema.to_arrow_schema()
| {
"content_hash": "26f5b20c679d85bcd08e0b47fb546434",
"timestamp": "",
"source": "github",
"line_count": 1969,
"max_line_length": 84,
"avg_line_length": 37.6124936515998,
"alnum_prop": 0.5954576756369921,
"repo_name": "xhochy/arrow",
"id": "1cfd210fb7616b5f42ebd6d286debd0aa0e96ec4",
"size": "74846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyarrow/parquet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "73655"
},
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "24676"
},
{
"name": "C",
"bytes": "881567"
},
{
"name": "C#",
"bytes": "699719"
},
{
"name": "C++",
"bytes": "14541996"
},
{
"name": "CMake",
"bytes": "560347"
},
{
"name": "Dockerfile",
"bytes": "100165"
},
{
"name": "Emacs Lisp",
"bytes": "1916"
},
{
"name": "FreeMarker",
"bytes": "2244"
},
{
"name": "Go",
"bytes": "848212"
},
{
"name": "HTML",
"bytes": "6152"
},
{
"name": "Java",
"bytes": "4713332"
},
{
"name": "JavaScript",
"bytes": "102300"
},
{
"name": "Julia",
"bytes": "235105"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "M4",
"bytes": "11095"
},
{
"name": "MATLAB",
"bytes": "36600"
},
{
"name": "Makefile",
"bytes": "57687"
},
{
"name": "Meson",
"bytes": "48356"
},
{
"name": "Objective-C",
"bytes": "17680"
},
{
"name": "Objective-C++",
"bytes": "12128"
},
{
"name": "PLpgSQL",
"bytes": "56995"
},
{
"name": "Perl",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "3135304"
},
{
"name": "R",
"bytes": "533584"
},
{
"name": "Ruby",
"bytes": "1084485"
},
{
"name": "Rust",
"bytes": "3969176"
},
{
"name": "Shell",
"bytes": "380070"
},
{
"name": "Thrift",
"bytes": "142033"
},
{
"name": "TypeScript",
"bytes": "1157087"
}
],
"symlink_target": ""
} |
from sklearn.cluster._kmeans import _kmeans_plusplus # noqa
__all__ = ["_kmeans_plusplus"]
| {
"content_hash": "b663a98a72f9fa387ef06fbb5091e53d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 60,
"avg_line_length": 31,
"alnum_prop": 0.6989247311827957,
"repo_name": "dask/dask-ml",
"id": "bce639c624991a0298e81eb97b82d918de84f816",
"size": "93",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dask_ml/cluster/_compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "798280"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
"""Common definitions.
"""
import logging
from collections import namedtuple
LOGGER = logging.getLogger(__name__)
# A unit of currency.
CurrencyAmount = namedtuple(
'CurrencyAmount',
'currency amount')
# Cost-free acquisition of a property. Similar to a BUY, but the cost is at
# $0 and the event is fully taxable as salary.
TRANS_ACQUIRE = 'ACQUIRE'
# Market purchase of a property.
TRANS_BUY = 'BUY'
# Market sell of a property.
TRANS_SELL = 'SELL'
# Return of capital.
TRANS_CAPITAL_RETURN = 'CAPITAL_RETURN'
# Dividends.
TRANS_DIVIDEND = 'DIVIDEND'
# Management fees. These can be deducted as carrying charges.
TRANS_FEE = 'FEE'
# Transactions are a named tuple with fields as follows:
# date: A datetime object.
# symbol: The name of the property (usually ticker symbol).
# type: The type of the transaction, from the TRANS_ enum.
# units: The number of items of property involved in the transaction.
# value: Per property item value, as a CurrencyAmount.
# fees: Any fees associated with the transaction, as a CurrencyAmount.
Transaction = namedtuple(
'Transaction',
'date settlement_date symbol type units value fees')
def TransactionComparator(tx1, tx2):
"""Transaction comparator.
Stably sorts by increasing dates.
"""
if tx1.settlement_date < tx2.settlement_date:
return -1
elif tx1.settlement_date == tx2.settlement_date:
return 0
else:
return 1
| {
"content_hash": "8be415a2c347261eca672d02d87d133a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 25.98148148148148,
"alnum_prop": 0.7320028510334996,
"repo_name": "chhamilton/acb",
"id": "12d4ea510f2fe2926a20c0fd092fd592b00c52cb",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acb/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36848"
}
],
"symlink_target": ""
} |
""" Module light
Defines a light source to light polygonal surfaces. Each axes has up
to eight lights associated with it.
"""
import OpenGL.GL as gl
from visvis.core import misc
from visvis.core.misc import PropWithDraw, DrawAfter, basestring
def _testColor(value, canBeScalar=True):
""" _testColor(value, canBeScalar=True)
Tests a color whether it is a sequence of 3 or 4 values.
It returns a 4 element tuple or raises an error if the suplied
data is incorrect.
"""
# Deal with named colors
if isinstance(value, basestring):
value = misc.getColor(value)
# Value can be a scalar
if canBeScalar and isinstance(value, (int, float)):
if value <= 0:
value = 0.0
if value >= 1:
value = 1.0
return value
# Otherwise it must be a sequence of 3 or 4 elements
elif not hasattr(value, '__len__'):
raise ValueError("Given value can not represent a color.")
elif len(value) == 4:
return (value[0], value[1], value[2], value[3])
elif len(value) == 3:
return (value[0], value[1], value[2], 1.0)
else:
raise ValueError("Given value can not represent a color.")
def _getColor(color, ref):
""" _getColor(color, reference)
Get the real color as a 4 element tuple, using the reference
color if the given color is a scalar.
"""
if isinstance(color, float):
return (color*ref[0], color*ref[1], color*ref[2], ref[3])
else:
return color
# todo: implement spot light and attenuation
class Light(object):
""" Light(axes, index)
A Light object represents a light source in the scene. It
determines how lit objects (such as Mesh objects) are visualized.
Each axes has 8 light sources, of which the 0th is turned on
by default. De 0th light source provides the ambient light in the
scene (the ambient component is 0 by default for the other light
sources). Obtain the lights using the axes.light0 and axes.lights
properties.
The 0th light source is a directional camera light by default; it
shines in the direction in which you look. The other lights are
oriented at the origin by default.
"""
def __init__(self, axes, index):
# Store axes and index of the light (OpenGl can handle up to 8 lights)
self._axes = axes.GetWeakref()
self._index = index
self._on = False
# The three light properties
self._color = (1, 1, 1, 1)
self._ambient = 0.0
self._diffuse = 1.0
self._specular = 1.0
# The main light has an ambien component by default
if index == 0:
self._ambient = 0.2
# Position or direction
if index == 0:
self._position = (0,0,1,0)
self._camLight = True
else:
self._position = (0,0,0,1)
self._camLight = False
def Draw(self):
# Draw axes
axes = self._axes()
if axes:
axes.Draw()
@PropWithDraw
def color():
""" Get/Set the reference color of the light. If the ambient,
diffuse or specular properties specify a scalar, that scalar
represents the fraction of *this* color.
"""
def fget(self):
return self._color
def fset(self, value):
self._color = _testColor(value, True)
return locals()
@PropWithDraw
def ambient():
""" Get/Set the ambient color of the light. This is the color
that is everywhere, coming from all directions, independent of
the light position.
The value can be a 3- or 4-element tuple, a character in
"rgbycmkw", or a scalar between 0 and 1 that indicates the
fraction of the reference color.
"""
def fget(self):
return self._ambient
def fset(self, value):
self._ambient = _testColor(value)
return locals()
@PropWithDraw
def diffuse():
""" Get/Set the diffuse color of the light. This component is the
light that comes from one direction, so it's brighter if it comes
squarely down on a surface than if it barely glances off the
surface. It depends on the light position how a material is lit.
"""
def fget(self):
return self._diffuse
def fset(self, value):
self._diffuse = _testColor(value)
return locals()
@PropWithDraw
def specular():
""" Get/Set the specular color of the light. This component
represents the light that comes from the light source and bounces
off a surface in a particular direction. This is what makes
materials appear shiny.
The value can be a 3- or 4-element tuple, a character in
"rgbycmkw", or a scalar between 0 and 1 that indicates the
fraction of the reference color.
"""
def fget(self):
return self._specular
def fset(self, value):
self._specular = _testColor(value)
return locals()
@PropWithDraw
def position():
""" Get/Set the position of the light. Can be represented as a
3 or 4 element tuple. If the fourth element is a 1, the light
has a position, if it is a 0, it represents a direction (i.o.w. the
light is a directional light, like the sun).
"""
def fget(self):
return self._position
def fset(self, value):
if len(value) == 3:
self._position = value[0], value[1], value[2], 1
elif len(value) == 4:
self._position = value[0], value[1], value[2], value[3]
else:
tmp = "Light position should be a 3 or 4 element sequence."
raise ValueError(tmp)
return locals()
@PropWithDraw
def isDirectional():
""" Get/Set whether the light is a directional light. A directional
light has no real position (it can be thought of as infinitely far
away), but shines in a particular direction. The sun is a good
example of a directional light.
"""
def fget(self):
return self._position[3] == 0
def fset(self, value):
# Get fourth element
if value:
fourth = 0
else:
fourth = 1
# Set position
tmp = self._position
self._position = tmp[0], tmp[1], tmp[2], fourth
return locals()
@PropWithDraw
def isCamLight():
""" Get/Set whether the light is a camera light. A camera light
moves along with the camera, like the lamp on a miner's hat.
"""
def fget(self):
return self._camLight
def fset(self, value):
self._camLight = bool(value)
return locals()
@DrawAfter
def On(self, on=True):
""" On(on=True)
Turn the light on.
"""
self._on = bool(on)
@DrawAfter
def Off(self):
""" Off()
Turn the light off.
"""
self._on = False
@property
def isOn(self):
""" Get whether the light is on.
"""
return self._on
def _Apply(self):
""" _Apply()
Apply the light position and other properties.
"""
thisLight = gl.GL_LIGHT0 + self._index
if self._on:
# Enable and set position
gl.glEnable(thisLight)
gl.glLightfv(thisLight, gl.GL_POSITION, self._position)
# Set colors
amb, dif, spe = gl.GL_AMBIENT, gl.GL_DIFFUSE, gl.GL_SPECULAR
gl.glLightfv(thisLight, amb, _getColor(self._ambient, self._color))
gl.glLightfv(thisLight, dif, _getColor(self._diffuse, self._color))
gl.glLightfv(thisLight, spe, _getColor(self._specular, self._color))
else:
# null-position means that the ligth is off
gl.glLightfv(thisLight, gl.GL_POSITION, (0.0, 0.0, 0.0, 0.0))
gl.glDisable(thisLight)
| {
"content_hash": "b97b87c994517f5c20aa4638c6cb0774",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 80,
"avg_line_length": 31.044609665427508,
"alnum_prop": 0.5662794874865286,
"repo_name": "Alwnikrotikz/visvis",
"id": "4cb7bae5677af9554771788a5ed30147f0ef9474",
"size": "8528",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "core/light.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "158972"
},
{
"name": "C++",
"bytes": "44817"
},
{
"name": "Python",
"bytes": "1424569"
}
],
"symlink_target": ""
} |
''' dbrev.tables_table.TablesTable implements the
Flyweight pattern which handles caching.
'''
import logging
LOG = logging.getLogger(__name__)
# LOG.setLevel(logging.INFO)
# Long lines expected.
# pylint: disable=C0301
# Cyclic imports protected by functions
# pylint: disable=R0401
from freevolv.models.dbrev.table import Table
from freevolv.dal import table
class TablesTable(table.Table):
''' TablesTable holds exactly one copy of each row queried from the
TABLES table.'''
table_name = "[DBREV].[TABLES]"
columns = ["DATABASE_NAME", "SCHEMA_NAME", "NAME", "PY_SINGULAR", "CAP_WORDS_SINGULAR", "PY_PLURAL", "CAP_WORDS_PLURAL", "SUPERTYPE_SCHEMA", "SUPERTYPE_NAME", "PRIMARY_KEY_NAME"]
_attributes = ['database_name', 'schema_name', 'name', 'py_singular', 'cap_words_singular', 'py_plural', 'cap_words_plural', 'supertype_schema', 'supertype_name', 'primary_key_name']
_template_class = Table
_template_keys = [('database_name', 'schema_name', 'name',)]
instance = None
@staticmethod
def get_instance():
''' Instantiates (if it hasn't already) a singleton TablesTable.
and returns it.
'''
if TablesTable.instance == None:
TablesTable.instance = TablesTable()
return TablesTable.instance
@staticmethod
def get_fields(table):
'''Get a dictionary of fields populated from table'''
fields = {}
fields["DATABASE_NAME"] = table.database_name
fields["SCHEMA_NAME"] = table.schema_name
fields["NAME"] = table.name
fields["PY_SINGULAR"] = table.py_singular
fields["CAP_WORDS_SINGULAR"] = table.cap_words_singular
fields["PY_PLURAL"] = table.py_plural
fields["CAP_WORDS_PLURAL"] = table.cap_words_plural
fields["SUPERTYPE_SCHEMA"] = table.supertype_schema
fields["SUPERTYPE_NAME"] = table.supertype_name
fields["PRIMARY_KEY_NAME"] = table.primary_key_name
return fields
@staticmethod
def set_fields(table, fields):
'''Get a new table populated from fields.'''
table.__init__(
database_name=fields['DATABASE_NAME'],
schema_name=fields['SCHEMA_NAME'],
name=fields['NAME'],
py_singular=fields['PY_SINGULAR'],
cap_words_singular=fields['CAP_WORDS_SINGULAR'],
py_plural=fields['PY_PLURAL'],
cap_words_plural=fields['CAP_WORDS_PLURAL'],
supertype_schema=fields['SUPERTYPE_SCHEMA'],
supertype_name=fields['SUPERTYPE_NAME'],
primary_key_name=fields['PRIMARY_KEY_NAME'])
def __init__(self):
super(TablesTable, self).__init__()
self.attributes = TablesTable._attributes
self.template_class = TablesTable._template_class
self.template_keys = TablesTable._template_keys
self._by_database_name_schema_name_name = {}
self.key_dict[('database_name', 'schema_name', 'name',)] = self.get_by_database_name_schema_name_name
self.by_key = self._by_database_name_schema_name_name
def add_dict_entry(self, table):
''' Add an entry to the table and all indexes.'''
# LOG.debug('adding: ' + str(table))
idx = table.database_name, table.schema_name, table.name
self._by_database_name_schema_name_name[idx] = table
def del_dict_entry(self, table):
''' Delete an entry from the table and all indexes.'''
# LOG.debug('deleting: ' + str(table))
idx = table.database_name, table.schema_name, table.name
del self._by_database_name_schema_name_name[idx]
def get_by_database_name_schema_name_name(self, database_name, schema_name, name):
''' Lookup up a record with the unique key defined by
database_name, schema_name, name.
'''
rtn = None
idx = database_name, schema_name, name
if idx in self._by_database_name_schema_name_name:
# LOG.debug(str(idx) + ' found')
rtn = self._by_database_name_schema_name_name[idx]
else:
# LOG.debug(str(idx) + ' loading...')
rtn = self.load_one(
database_name=database_name,
schema_name=schema_name,
name=name)
return rtn
by_database_name_schema_name_name = property(get_by_database_name_schema_name_name)
| {
"content_hash": "0f6d1114af465ff8d7913347b9900eb8",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 186,
"avg_line_length": 40.91588785046729,
"alnum_prop": 0.6233439926907264,
"repo_name": "genevolv/dbrev",
"id": "648c4f230abb5a921a52f10ca84f25a1cc659610",
"size": "4378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/freevolv/models/dbrev/tables_table.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "306037"
}
],
"symlink_target": ""
} |
import unittest
# Package
from plan.schedule import Schedule
from plan.time_range import TimeRange
class TestSchedule(unittest.TestCase):
def test_times(self):
pass
def test_time_zone(self):
pass
def test_operators(self):
pass
def test_add_time(self):
pass
def test_remove_time(self):
pass
def test_now(self):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e496516da75a805498cf721979f40181",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 38,
"avg_line_length": 14.833333333333334,
"alnum_prop": 0.6067415730337079,
"repo_name": "CodePeasants/pyplan",
"id": "5893629308fa2463d19d9a7e8b76e6a82e712e4d",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_schedule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72442"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(0, '../../pyv8')
import pyv8run
if "LibTest" not in sys.argv:
sys.argv.append("LibTest")
pyv8run.build_script()
| {
"content_hash": "a3ec6bf3ee0d268e2d66e962659e3ccd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.6923076923076923,
"repo_name": "gpitel/pyjs",
"id": "671de68049c2128ae43aea4b1a3fce2b153f316d",
"size": "161",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "examples/libtest/compile_only.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5517085"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('villes', '0002_load_data'),
('floreal', '0005_constrain_admin_messages'),
]
operations = [
migrations.AddField(
model_name='network',
name='search_string',
field=models.TextField(default='', max_length=256),
),
migrations.AddField(
model_name='network',
name='ville',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='villes.ville'),
),
]
| {
"content_hash": "809494da30ae6c8fa217e8f2da472fd1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 138,
"avg_line_length": 29.217391304347824,
"alnum_prop": 0.59375,
"repo_name": "fab13n/caracole",
"id": "a7c3638220cdad71fa77453c919bd862c660eba7",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "floreal/migrations/0006_add_villes_and_search_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59189"
},
{
"name": "Dockerfile",
"bytes": "823"
},
{
"name": "HTML",
"bytes": "179835"
},
{
"name": "JavaScript",
"bytes": "736880"
},
{
"name": "Python",
"bytes": "224237"
},
{
"name": "Shell",
"bytes": "4080"
},
{
"name": "TeX",
"bytes": "7787"
}
],
"symlink_target": ""
} |
"""App factory."""
import os
from flask import Flask, Blueprint
from server.config import configs
from server.data import data
from server.data.module import load_modules
from server.data.research import load_research
from server.extensions import db, mail, ma, migrate, passlib
# Blueprints
user_bp = Blueprint('user', __name__)
admin_bp = Blueprint('admin', __name__)
def create_app(config='develop'):
"""App factory."""
app = Flask(
__name__,
static_folder='../static',
static_url_path='/static-native',
)
config = configs[config]
app.config.from_object(config)
db.init_app(app)
ma.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
passlib.init_app(app)
from server.handlers import register_handlers
register_handlers(app)
register_blueprints(app)
data.module_data = load_modules(getattr(config(), "MODULE_FILE_PATH"))
data.research_data = load_research(getattr(config(), "RESEARCH_FILE_PATH"))
return app
def register_blueprints(app):
"""Register all blueprints."""
app.register_blueprint(user_bp)
app.register_blueprint(admin_bp, url_prefix='/admin')
def run():
"""Run the app on all IPs."""
config = os.getenv('SQLALCHEMY_DATABASE_URI', 'develop')
app = create_app(config)
app.run(host="0.0.0.0")
import server.api # noqa
import server.models # noqa
| {
"content_hash": "cc3d5a629223ac24b02a94a33bd321e2",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 24.49122807017544,
"alnum_prop": 0.6726361031518625,
"repo_name": "Nukesor/spacesurvival",
"id": "ba439b22dd9a1aae7f959df7e51ede2fa2aa978e",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1318"
},
{
"name": "Elm",
"bytes": "58816"
},
{
"name": "HTML",
"bytes": "434"
},
{
"name": "JavaScript",
"bytes": "2053"
},
{
"name": "Python",
"bytes": "70795"
},
{
"name": "Shell",
"bytes": "548"
}
],
"symlink_target": ""
} |
import ShareYourSystem as SYS
import scipy.stats
#degine
@SYS.ClasserClass()
class MakerClass(SYS.ExplorerClass):
def default_init(
self,
_MakingXFloat = 0.,
_MakingYFloat = 0.,
_MadeDistanceFloat = 0.,
**_KwargVariablesDict
):
#Call the parent __init__ method
SYS.ExplorerClass.__init__(self,**_KwargVariablesDict)
def do_make(self):
#compute
self.MadeDistanceFloat = SYS.numpy.sqrt(self.MakingXFloat**2 + self.MakingYFloat**2)
#debug
'''
self.debug(
[
('self.',self,['MadeDistanceFloat'])
]
)
'''
#Definition
DiskMaker=SYS.MakerClass(
).make(
1.,2.
).explore(
_MethodStr = 'make',
_SuccessesInt = 1000,
_RangeVariable = {
'MakingXFloat':lambda self:-1.+2.*scipy.stats.uniform.rvs(),
'MakingYFloat':lambda self:-1.+2.*scipy.stats.uniform.rvs()
},
_ConditionVariable = {
'checkDistanceFloat':lambda self:self.MadeDistanceFloat<1.
}
)
#print
print('DiskMaker is ')
SYS._print(DiskMaker)
#show
map(
lambda __TuplesList:
SYS.plot(*list(dict(__TuplesList).values()),marker='o'),
DiskMaker.ExploredStoreTuplesListsList
)
SYS.show() | {
"content_hash": "d6179a0cf10d10b2d49c00bca51ae93b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 19.016666666666666,
"alnum_prop": 0.6573181419807187,
"repo_name": "Ledoux/ShareYourSystem",
"id": "f0728bb1f8c93b8a9021b65c3e2ecee161558ad1",
"size": "1156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Itemizers/Explorer/01_ExampleDoc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
import imp
import json
import copy
rules = imp.load_source('chess_basic_rules','common/rules.py')
piece_value = json.load(open("common/chess_piece_priority.json"))
helper = imp.load_source('helper_functions','common/helper_functions.py')
opposite = { "white" : "black" , "black" : "white" }
def emperical_comparision(board,color):
return sum([ float(piece_value[x]) for x in board[color].keys() ]) - \
sum( [ float(piece_value[x]) for x in board[opposite[color]].keys() ])
def risk_comparision(board,color):
return sum([ float(piece_value[x]) for x in board[opposite[color]].keys() if helper.if_piece_under_attack(board,opposite[color],x) ]) - \
sum([ float(piece_value[x]) for x in board[color].keys() if helper.if_piece_under_attack(board,color,x) ] )
def defence_comparision(board,color):
return sum([ float(helper.shielding(board,color,x)) for x in board[color].keys() ]) - \
sum ([ float(helper.shielding(board,opposite[color],x)) for x in board[opposite[color]].keys() ] )
def evaluate_board(board,color):
if helper.in_checkmate(board,color) or helper.in_check(board,color): return float('-inf')
if helper.in_checkmate(board,opposite[color]) or helper.in_check(board,opposite[color]): return float('inf')
##TODO: here only two extremes cases has only been handled
## need to write the middle cases, which will include
## the current position of the player's pieces.
emperical_eval = emperical_comparision(board,color)
risk_eval = risk_comparision(board,color)
defence_eval = defence_comparision(board,color)
#print emperical_eval , risk_eval , defence_eval
return risk_eval
''' this part of code is related to minimax and not being used due to presence of better alternative over it'''
def minimax(board,color,depth):
if depth == 0 : return evaluate_board(board,color)
moves_list = helper.get_moves(board,color)
if len(moves_list) == 0: return None
best_move = moves_list[0]
best_score = float('-inf')
for move in moves_list:
clone_board = helper.generate_board(board,move)
score = min_play(clone_board,opposite[color],depth)
if score > best_score:
best_move= move
best_score = score
return best_move
def min_play(board,color,depth):
if helper.game_over(board,color) or depth <= 0:
return evaluate_board(board,color)
moves_list = helper.get_moves(board,color)
best_score = float('inf')
for move in moves_list:
clone_board = helper.generate_board(board,move)
score =max_play(clone_board,opposite[color],depth-1)
#print "evaluating move : ", move, score
if score < best_score:
best_move = move
best_score = score
return best_score
def max_play(board,color,depth):
if helper.game_over(board,color) or depth <= 0 :
return evaluate_board(board,color)
moves_list = helper.get_moves(board,color)
best_score = float('-inf')
for move in moves_list:
clone_board = helper.generate_board(board,move)
score = min_play(clone_board,color,depth-1)
#print "evaluating move : ", move,score
if score > best_score:
best_move = move
best_score = score
return best_score
''' this is the better alternative over minimax '''
# this alpha_beta_pruning is called from external c++ program
import subprocess
def alpha_beta_pruning(board,color,depth):
temp_str = "king queen bishop_1 bishop_2 knight_1 knight_2 rook_1 rook_2 pawn_1 pawn_2 pawn_3 pawn_4 pawn_5 pawn_6 pawn_7 pawn_8".split(" ")
data= ""
mm = {}
for i in xrange(len(temp_str)):
if temp_str[i] in board['white'].keys():
xy=board['white'][temp_str[i]]
else:
xy=[-1,-1]
data = data + str(xy[0]) +" "+ str(xy[1]) + "\n"
#print xy[0],xy[1]
mm[i] = temp_str[i]
for i in xrange(len(temp_str)):
if temp_str[i] in board['black'].keys():
xy=board['black'][temp_str[i]]
else:
xy=[-1,-1]
data = data + str(xy[0]) + " " + str(xy[1]) + "\n"
#print xy[0],xy[1]
mm[16+i] = temp_str[i]
#print "\n\n"
#print data
if color == "white":
player=0
else:
player=16
#print "player :"+str(player),"depth : "+str(depth)
proc = subprocess.Popen(["ai/minimax", str(player), str(depth) ],stdout=subprocess.PIPE,stdin=subprocess.PIPE)
out = proc.communicate(data)
#print out
piece, x, y = out[0].split(" ")
piece=mm[int(piece)]
x = int(x) +1
y = int(y) +1
print piece , x, y
return { 'color':"black", 'piece' : piece , 'new_position' : [y,x] }
def alpha_beta_pruning_python_native(board,color,depth):
if depth == 0 : return evaluate_board(board,color)
moves_list = helper.get_moves(board,color)
if len(moves_list) == 0: return None
best_move = moves_list[0]
best_score = float('-inf')
alpha = float('-inf')
beta = float('inf')
for move in moves_list:
clone_board = helper.generate_board(board,move)
score = alpha_beta_min(clone_board, opposite[color], alpha, beta, depth)
if score > best_score:
best_move= move
best_score = score
return best_move
def alpha_beta_min(board,color,alpha,beta, depth):
if depth == 0 or helper.game_over(board, color) :
return evaluate_board(board,color)
moves_list = helper.get_moves(board,color)
for move in moves_list:
clone_board = helper.generate_board(board,move)
score = alpha_beta_max(clone_board,opposite[color],alpha,beta,depth-1)
if score <= beta:
return alpha
if score < alpha:
beta = score
return beta
def alpha_beta_max(board,color,alpha,beta, depth):
if depth == 0 or helper.game_over(board, color) :
return evaluate_board(board,color)
moves_list = helper.get_moves(board,color)
for move in moves_list:
clone_board = helper.generate_board(board,move)
score = alpha_beta_min(clone_board,opposite[color],alpha,beta,depth-1)
if score >= beta:
return beta
if score > alpha:
alpha = score
return alpha
| {
"content_hash": "f4620b1f691c600231cadd1c7da80cc5",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 141,
"avg_line_length": 26.91304347826087,
"alnum_prop": 0.6337641357027464,
"repo_name": "OpenC-IIIT/prosfair",
"id": "1d4fb484ccb5a12ebc5a8f7076f412f665d9b916",
"size": "6190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ai/cpu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "18701"
},
{
"name": "Python",
"bytes": "50598"
}
],
"symlink_target": ""
} |
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree # noqa: E402
from astropy import units as u # noqa: E402
from astropy.time import Time, TimeDelta # noqa: E402
@pytest.mark.parametrize("fmt", TimeDelta.FORMATS.keys())
def test_timedelta(fmt, tmpdir):
t1 = Time(Time.now())
t2 = Time(Time.now())
td = TimeDelta(t2 - t1, format=fmt)
tree = dict(timedelta=td)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("scale", list(TimeDelta.SCALES) + [None])
def test_timedelta_scales(scale, tmpdir):
tree = dict(timedelta=TimeDelta(0.125, scale=scale, format="jd"))
assert_roundtrip_tree(tree, tmpdir)
def test_timedelta_vector(tmpdir):
tree = dict(timedelta=TimeDelta([1, 2] * u.day))
assert_roundtrip_tree(tree, tmpdir)
| {
"content_hash": "d350dd6ee0e887b17fa5aa68bed3bdd8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 29.464285714285715,
"alnum_prop": 0.7042424242424242,
"repo_name": "astropy/astropy",
"id": "bdde8acfda56f50258c17cf219a12f71d6b1383f",
"size": "890",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "astropy/io/misc/asdf/tags/time/tests/test_timedelta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11039709"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "79917"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12402561"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
"""
WSGI config for locationUpdator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "locationUpdator.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| {
"content_hash": "adc3ba29b0e9eada6316bd00c47189d5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7971887550200804,
"repo_name": "valarpirai/team-locator",
"id": "b82b7218ed42c2dc5e1d95489b8e75b28a594b4e",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locationUpdator/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9506"
},
{
"name": "JavaScript",
"bytes": "11926"
},
{
"name": "Python",
"bytes": "20957"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_add_indexes'),
]
operations = [
migrations.AlterField(
model_name='document',
name='document_value',
field=models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Document value'),
),
migrations.AlterField(
model_name='document',
name='net_value',
field=models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Net value'),
),
migrations.AlterField(
model_name='document',
name='reimbursement_value',
field=models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Reimbusrsement value'),
),
migrations.AlterField(
model_name='document',
name='remark_value',
field=models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Remark value'),
),
]
| {
"content_hash": "784efa1ee390cf30364b02a4c85c6b62",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 108,
"avg_line_length": 32.484848484848484,
"alnum_prop": 0.5960820895522388,
"repo_name": "marcusrehm/serenata-de-amor",
"id": "6728cba68b93c3dc372a41ff297ad14abefdaeee",
"size": "1145",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "jarbas/core/migrations/0003_remove_some_indexes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "301"
},
{
"name": "Elm",
"bytes": "131019"
},
{
"name": "HTML",
"bytes": "4527"
},
{
"name": "JavaScript",
"bytes": "1468"
},
{
"name": "Python",
"bytes": "425718"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
from typing import Any, cast
import urwid
ELLIPSIS = "..."
class EllipsisTextLayout(urwid.TextLayout):
def supports_align_mode(self, align: Any) -> bool:
return align in (urwid.LEFT, urwid.RIGHT)
def supports_wrap_mode(self, wrap: Any) -> bool:
return cast(bool, wrap == urwid.CLIP)
def layout(self, text: str, width: int, align: Any, wrap: Any) -> Any:
if urwid.util.calc_width(text, 0, len(text)) <= width:
return [[(len(text), 0, len(text))]]
if width <= len(ELLIPSIS):
return [[(width, 1, b"." * width)]]
ellipsis_segment = (len(ELLIPSIS), 1, ELLIPSIS.encode("utf-8"))
offset = 1
if align == urwid.LEFT:
while True:
part = text[0 : len(text) - len(ELLIPSIS) - offset] + ELLIPSIS
if urwid.util.calc_width(part, 0, len(part)) <= width:
break
offset += 1
text_segment = (
len(text) - offset - len(ELLIPSIS),
0,
len(text) - offset - len(ELLIPSIS),
)
return [[text_segment, ellipsis_segment]]
elif align == urwid.RIGHT:
while True:
part = ELLIPSIS + text[len(ELLIPSIS) + offset :]
if urwid.util.calc_width(part, 0, len(part)) <= width:
break
offset += 1
text_segment = (
len(text) - offset - len(ELLIPSIS),
offset + len(ELLIPSIS),
len(text),
)
return [[ellipsis_segment, text_segment]]
else:
assert False
| {
"content_hash": "846fbce84b85f30f99df31e01bbce021",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 33.87755102040816,
"alnum_prop": 0.4963855421686747,
"repo_name": "rr-/dotfiles",
"id": "419fb516f3a31ef1ab53a709519df91f7ab7c4fe",
"size": "1660",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "opt/booru-toolkit/booru_toolkit/upload/ui/ellipsis_text_layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "94637"
},
{
"name": "Perl",
"bytes": "13652"
},
{
"name": "Python",
"bytes": "246436"
},
{
"name": "Shell",
"bytes": "19612"
},
{
"name": "Vim Script",
"bytes": "18664"
}
],
"symlink_target": ""
} |
from bs4 import BeautifulSoup
from weibo.common import log as logging
LOG = logging.getLogger(__name__)
class HBeautifulSoup(BeautifulSoup):
pass
class Soup(object):
def __init__(self, *args, **kwargs):
self.parser_type = "html.parser"
self.soup = None
def __call__(self, wb, **kwargs):
if not wb:
raise
self.soup = HBeautifulSoup(wb, self.parser_type)
def __getattr__(self, key):
if self.soup:
return getattr(self.soup, key)
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
| {
"content_hash": "87be76951f27e967c609789fa3e87b3f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 56,
"avg_line_length": 21.47222222222222,
"alnum_prop": 0.5937904269081501,
"repo_name": "windskyer/weibo",
"id": "ad26c9c39da0235d8917451506bdf2419531732f",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weibo/jiexi/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "324965"
},
{
"name": "Shell",
"bytes": "2077"
}
],
"symlink_target": ""
} |
"""
author: Chris Fernandez, chris2fernandez@gmail.com
last updated: 05/26/2015
An script that can be used to create and launch all example NEXT experiments from examples/ directory.
Usage from Command line:
python run_examples.py
"""
import os
# List of example experiment directories
curr_examples = ['cartoon_tuple/',
'cartoon_dueling/',
'strange_fruit_triplet/',
'cartoon_cardinal/']
# List of example experiment launchers
curr_experiments = ['experiment_tuple.py',
'experiment_dueling.py',
'experiment_triplet.py',
'experiment_cardinal.py']
# Save the absolute path to the current directory
curr_dir = os.path.dirname(os.path.abspath(__file__))
for i, curr_ex in enumerate(curr_examples):
# Join abs path to curr_dir with iterate from curr_examples
temp_dir = os.path.join(curr_dir, curr_ex)
# Change current working directory to temp_dir
os.chdir(temp_dir)
# Spawn a shell process using temp_dir and corresponding curr_experiments iterate
os.system('python '+temp_dir+curr_experiments[i])
# Return current working directory to curr_dir path
os.chdir(curr_dir)
| {
"content_hash": "ce5c57473f656aa26397dc33844d03fc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 102,
"avg_line_length": 35,
"alnum_prop": 0.6899159663865546,
"repo_name": "sumeetsk/NEXT",
"id": "008b75dfe07d4472b8be7646a3ac2e1e572c8c21",
"size": "1208",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/run_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1732"
},
{
"name": "HTML",
"bytes": "99474"
},
{
"name": "JavaScript",
"bytes": "35965"
},
{
"name": "Python",
"bytes": "612823"
},
{
"name": "Shell",
"bytes": "7910"
}
],
"symlink_target": ""
} |
"""
MoinMoin - PDF filter
Depends on: pdftotext command from either xpdf-utils or poppler-utils
or any other package that provides a pdftotext command that
is callable with: pdftotext -enc UTF-8 filename.pdf -
@copyright: 2006 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.filter import execfilter
def execute(indexobj, filename):
# using -q switch to get quiet operation (no messages, no errors),
# because poppler-utils pdftotext on Debian/Etch otherwise generates
# lots of output on stderr (e.g. 10MB stderr output) and that causes
# problems in current execfilter implementation.
return execfilter("pdftotext -q -enc UTF-8 %s -", filename)
| {
"content_hash": "d7718670bb02981a41759ec8e56b43c1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 37.9,
"alnum_prop": 0.7058047493403694,
"repo_name": "RealTimeWeb/wikisite",
"id": "839ebce32ef0297ad71f8eb092f917f3c7e76197",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/filter/application_pdf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import json
from .interface import ContentTypeSerializer
from ..exceptions import SerializationException
from ..compat import string_type
class JsonSerializer(ContentTypeSerializer):
content_type = ["application/json"]
@staticmethod
def dump(data):
"""
should return back a bytes (or string in python 2),
representation of your object, to be used in e.g. response
bodies.
"""
return json.dumps(data).encode("UTF-8")
@property
def main_type(self):
return self.content_type[0]
@staticmethod
def load(raw_bytes):
"""
given a bytes object, should return a base python data
structure that represents the object.
"""
try:
if not isinstance(raw_bytes, string_type):
raw_bytes = raw_bytes.decode()
return json.loads(raw_bytes)
except ValueError as e:
raise SerializationException(str(e))
@staticmethod
def can_handle(content_type_name):
"""
given a content type, returns true if this serializer
can convert bodies of the given type.
"""
return "json" in content_type_name
| {
"content_hash": "709f6ba7435daf600b89c80f3c3d18f2",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 27.976744186046513,
"alnum_prop": 0.6201163757273483,
"repo_name": "toumorokoshi/web-transmute",
"id": "a0821b4a6a0143e3f4daf3f22b30a04c147efffc",
"size": "1203",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transmute_core/contenttype_serializers/json_serializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21259"
}
],
"symlink_target": ""
} |
import pygame
import Game
from Menu.HeadMenu import HeadMenu
from Menu.StartMenu.StartMenuItems.ExitGame import ExitGame
from Menu.InGameMenu.StartMenuButton import StartMenuButton
from Vector2 import Vector2
class FinalScreen(HeadMenu):
def __init__(self, resolution: Vector2, background=None, logo=None, winner=''):
super().__init__(resolution, background, logo)
self.ExitButton = ExitGame(Vector2(0, 210))
self.StartMenuButton = StartMenuButton(Vector2(0, 100))
self.Winner = winner
def Update(self, game: Game):
if self.ExitButton.IsClickedByMouse(game):
self.ExitButton.GetNewState()
if self.StartMenuButton.IsClickedByMouse(game):
from Menu.StartMenu.StartMenu import StartMenu
return StartMenu(game.Settings.Resolution)
return super().Update(game)
def Draw(self, game: Game):
super().Draw(game)
screen_centerX = game.Settings.Resolution.X // 2.4
screen_centerY = game.Settings.Resolution.Y // 2
font = pygame.font.Font(None, 30)
winnerText = font.render(str(self.Winner) + " is the winner!", 0, (255, 255, 255))
game.Settings.GetScreen().blit(winnerText, (screen_centerX, screen_centerY))
self.ExitButton.Draw(game)
self.StartMenuButton.Draw(game)
| {
"content_hash": "f14849a409e1d38aba3dbde41c25d58b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 36.027027027027025,
"alnum_prop": 0.6804201050262566,
"repo_name": "HRODEV/Frequency",
"id": "b54ee82000e91e3f9000043d6a8d63166a5c6356",
"size": "1335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Frequency/FinalScreen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112720"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from os import PathLike
from typing import IO, Any, Dict, List, Optional, Tuple, Union
from .lang_types import (
Asset,
Association,
AttackStep,
AttackStepType,
Category,
Field,
Multiplicity,
Risk,
Steps,
Variable,
)
from .reader import LangReader
from .step_types import (
StepAttackStep,
StepCollect,
StepDifference,
StepExpression,
StepField,
StepIntersection,
StepSubType,
StepTransitive,
StepUnion,
StepVariable,
)
from .ttc_types import (
TtcAddition,
TtcDistribution,
TtcDivision,
TtcExponentiation,
TtcExpression,
TtcFunction,
TtcMultiplication,
TtcNumber,
TtcSubtraction,
)
class Lang:
def __init__(self, file: Union[str, PathLike[Any], IO[bytes]]) -> None:
self._reader = LangReader(file)
self.defines: Dict[str, str] = self._reader.langspec["defines"]
self.categories: Dict[str, Category] = {}
self.assets: Dict[str, Asset] = {}
self.associations: List[Association] = []
self.license: Optional[str] = self._reader.license
self.notice: Optional[str] = self._reader.notice
self._create_lang()
del self._reader
def _create_lang(self) -> None:
for category in self._reader.langspec["categories"]:
category_ = self._create_category(category)
self.categories[category_.name] = category_
for asset in self._reader.langspec["assets"]:
asset_ = self._create_asset(asset)
self.assets[asset_.name] = asset_
for asset in self._reader.langspec["assets"]:
if asset["superAsset"] is None:
continue
asset_ = self.assets[asset["name"]]
super_asset_ = self.assets[asset["superAsset"]]
asset_.super_asset = super_asset_
for association in self._reader.langspec["associations"]:
association_ = self._create_association(association)
self.associations.append(association_)
self._create_step_expressions()
def _create_category(self, category: Dict[str, Any]) -> Category:
return Category(name=category["name"], meta=category["meta"])
def _create_asset(self, asset: Dict[str, Any]) -> Asset:
category_ = self.categories[asset["category"]]
asset_ = Asset(
name=asset["name"],
meta=asset["meta"],
category=category_,
is_abstract=asset["isAbstract"],
_svg_icon=self._reader.svg_icons.get(asset["name"]),
_png_icon=self._reader.png_icons.get(asset["name"]),
)
category_.assets[asset_.name] = asset_
for variable in asset["variables"]:
variable_ = self._create_variable(asset_, variable)
asset_._variables[variable_.name] = variable_
for attack_step in asset["attackSteps"]:
attack_step_ = self._create_attack_step(asset_, attack_step)
asset_._attack_steps[attack_step_.name] = attack_step_
return asset_
def _create_variable(self, asset_: Asset, variable: Dict[str, Any]) -> Variable:
return Variable(name=variable["name"], asset=asset_)
def _create_attack_step(
self, asset_: Asset, attack_step: Dict[str, Any]
) -> AttackStep:
def create_risk(risk: Optional[Dict[str, bool]]) -> Optional[Risk]:
if risk is None:
return None
return Risk(
is_confidentiality=risk["isConfidentiality"],
is_integrity=risk["isIntegrity"],
is_availability=risk["isAvailability"],
)
def create_ttc(
ttc_expression: Optional[Dict[str, Any]]
) -> Optional[TtcExpression]:
if ttc_expression is None:
return None
return self._create_ttc_expression(ttc_expression)
def create_steps(steps: Optional[Dict[str, Any]]) -> Optional[Steps]:
if not steps:
return None
return Steps(overrides=steps["overrides"])
return AttackStep(
name=attack_step["name"],
meta=attack_step["meta"],
asset=asset_,
type=AttackStepType(attack_step["type"]),
_tags=set(attack_step["tags"]),
_risk=create_risk(attack_step["risk"]),
_ttc=create_ttc(attack_step["ttc"]),
_requires=create_steps(attack_step["requires"]),
_reaches=create_steps(attack_step["reaches"]),
)
def _create_ttc_expression(self, ttc_expression: Dict[str, Any]) -> TtcExpression:
if ttc_expression["type"] in {
"addition",
"subtraction",
"multiplication",
"division",
"exponentiation",
}:
lhs = self._create_ttc_expression(ttc_expression["lhs"])
rhs = self._create_ttc_expression(ttc_expression["rhs"])
if ttc_expression["type"] == "addition":
return TtcAddition(lhs=lhs, rhs=rhs)
if ttc_expression["type"] == "subtraction":
return TtcSubtraction(lhs=lhs, rhs=rhs)
if ttc_expression["type"] == "multiplication":
return TtcMultiplication(lhs=lhs, rhs=rhs)
if ttc_expression["type"] == "division":
return TtcDivision(lhs=lhs, rhs=rhs)
if ttc_expression["type"] == "exponentiation":
return TtcExponentiation(lhs=lhs, rhs=rhs)
raise RuntimeError(
f"Failed to create TTC expression with type '{ttc_expression['type']}'"
)
if ttc_expression["type"] == "function":
return TtcFunction(
distribution=TtcDistribution(ttc_expression["name"]),
arguments=ttc_expression["arguments"],
)
if ttc_expression["type"] == "number":
return TtcNumber(value=ttc_expression["value"])
raise RuntimeError(
f"Failed to create TTC expression with type '{ttc_expression['type']}'"
)
def _create_association(self, association: Dict[str, Any]) -> Association:
left_asset_ = self.assets[association["leftAsset"]]
right_asset_ = self.assets[association["rightAsset"]]
left_field_ = Field(
name=association["leftField"],
asset=right_asset_,
multiplicity=self._create_multiplicity(association["leftMultiplicity"]),
)
right_field_ = Field(
name=association["rightField"],
asset=left_asset_,
multiplicity=self._create_multiplicity(association["rightMultiplicity"]),
)
association_ = Association(
name=association["name"],
meta=association["meta"],
left_field=left_field_,
right_field=right_field_,
)
right_asset_._fields[left_field_.name] = left_field_
left_asset_._fields[right_field_.name] = right_field_
left_field_.target = right_field_
right_field_.target = left_field_
left_field_.association = association_
right_field_.association = association_
return association_
def _create_multiplicity(self, multiplicity: Dict[str, Any]) -> Multiplicity:
# pylint: disable=no-value-for-parameter
return Multiplicity((multiplicity["min"], multiplicity["max"])) # type: ignore
def _create_step_expressions(self) -> None:
variable_targets_: Dict[Tuple[str, str], Asset] = {}
for asset in self._reader.langspec["assets"]:
asset_ = self.assets[asset["name"]]
for variable in asset["variables"]:
variable_ = asset_._variables[variable["name"]]
target_ = self._get_step_target(asset_, variable["stepExpression"])
variable_targets_[(variable_.asset.name, variable_.name)] = target_
for asset in self._reader.langspec["assets"]:
asset_ = self.assets[asset["name"]]
for variable in asset["variables"]:
variable_ = asset_._variables[variable["name"]]
variable_.step_expression = self._create_step_expression(
asset_, variable["stepExpression"], variable_targets_
)
for attack_step in asset["attackSteps"]:
attack_step_ = asset_._attack_steps[attack_step["name"]]
if attack_step_._requires:
for step_expression in attack_step["requires"]["stepExpressions"]:
attack_step_._requires.step_expressions.append(
self._create_step_expression(
asset_, step_expression, variable_targets_
)
)
if attack_step_._reaches:
for step_expression in attack_step["reaches"]["stepExpressions"]:
attack_step_._reaches.step_expressions.append(
self._create_step_expression(
asset_, step_expression, variable_targets_
)
)
def _get_step_target(
self, source_: Asset, step_expression: Dict[str, Any]
) -> Asset:
def get_asset(asset_name: str) -> Dict[str, Any]:
for asset in self._reader.langspec["assets"]:
if asset["name"] == asset_name:
return asset
raise RuntimeError(f"Failed to get asset '{asset_name}'")
def get_variable(asset_name: str, variable_name: str) -> Dict[str, Any]:
asset = get_asset(asset_name)
for variable in asset["variables"]:
if variable["name"] == variable_name:
return variable
if asset["superAsset"] is None:
raise RuntimeError(
f"Failed to get variable '{asset_name}.{variable_name}'"
)
return get_variable(asset["superAsset"], variable_name)
if step_expression["type"] in {"union", "intersection", "difference"}:
lhs_target_ = self._get_step_target(source_, step_expression["lhs"])
rhs_target_ = self._get_step_target(source_, step_expression["rhs"])
target_ = lhs_target_ | rhs_target_
assert target_ is not None
return target_
if step_expression["type"] == "collect":
return self._get_step_target(
self._get_step_target(source_, step_expression["lhs"]),
step_expression["rhs"],
)
if step_expression["type"] == "transitive":
return source_
if step_expression["type"] == "subType":
return self.assets[step_expression["subType"]]
if step_expression["type"] == "field":
return source_.fields[step_expression["name"]].target.asset
if step_expression["type"] == "attackStep":
return source_.attack_steps[step_expression["name"]].asset
if step_expression["type"] == "variable":
return self._get_step_target(
source_,
get_variable(source_.name, step_expression["name"])["stepExpression"],
)
raise RuntimeError(
f"Failed to get target of step expression with type '{step_expression['type']}'"
)
def _create_step_expression(
self,
source_: Asset,
step_expression: Dict[str, Any],
variable_targets_: Dict[Tuple[str, str], Asset],
) -> StepExpression:
if step_expression["type"] in {"union", "intersection", "difference"}:
lhs_ = self._create_step_expression(
source_, step_expression["lhs"], variable_targets_
)
rhs_ = self._create_step_expression(
source_, step_expression["rhs"], variable_targets_
)
target_ = lhs_.target_asset | rhs_.target_asset
assert target_ is not None
if step_expression["type"] == "union":
return StepUnion(
source_asset=source_, target_asset=target_, lhs=lhs_, rhs=rhs_
)
if step_expression["type"] == "intersection":
return StepIntersection(
source_asset=source_, target_asset=target_, lhs=lhs_, rhs=rhs_
)
if step_expression["type"] == "difference":
return StepDifference(
source_asset=source_, target_asset=target_, lhs=lhs_, rhs=rhs_
)
raise RuntimeError(
f"Failed to create step expression with type '{step_expression['type']}'"
)
if step_expression["type"] == "collect":
lhs_ = self._create_step_expression(
source_, step_expression["lhs"], variable_targets_
)
rhs_ = self._create_step_expression(
lhs_.target_asset, step_expression["rhs"], variable_targets_
)
return StepCollect(
source_asset=source_, target_asset=rhs_.target_asset, lhs=lhs_, rhs=rhs_
)
if step_expression["type"] == "transitive":
step_expression_ = self._create_step_expression(
source_, step_expression["stepExpression"], variable_targets_
)
return StepTransitive(
source_asset=source_,
target_asset=source_,
step_expression=step_expression_,
)
if step_expression["type"] == "subType":
sub_type_ = self.assets[step_expression["subType"]]
step_expression_ = self._create_step_expression(
source_, step_expression["stepExpression"], variable_targets_
)
return StepSubType(
source_asset=source_,
target_asset=sub_type_,
sub_type=sub_type_,
step_expression=step_expression_,
)
if step_expression["type"] == "field":
field_ = source_.fields[step_expression["name"]]
return StepField(
source_asset=source_, target_asset=field_.target.asset, field=field_
)
if step_expression["type"] == "attackStep":
attack_step_ = source_.attack_steps[step_expression["name"]]
return StepAttackStep(
source_asset=source_,
target_asset=attack_step_.asset,
attack_step=attack_step_,
)
if step_expression["type"] == "variable":
variable_ = source_.variables[step_expression["name"]]
target_ = variable_targets_[(variable_.asset.name, variable_.name)]
return StepVariable(
source_asset=source_, target_asset=target_, variable=variable_
)
raise RuntimeError(
f"Failed to create step expression with type '{step_expression['type']}'"
)
| {
"content_hash": "91eb1a61a59f29238e3f1bb02bc92d3e",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 92,
"avg_line_length": 41.37158469945355,
"alnum_prop": 0.5563333773609827,
"repo_name": "foreseeti/securicad-model-sdk",
"id": "7a6e4c90d979081f26ebea41d2804b0a11c6fb9b",
"size": "15749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "securicad/langspec/lang.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "268173"
},
{
"name": "Shell",
"bytes": "5757"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
import six
from cinder import context
from cinder.volume.drivers.cloudbyte import cloudbyte
from cinder.i18n import _, _LE, _LI
from cinder import exception
LOG = logging.getLogger(__name__)
class CloudByteISCSIDriver130(cloudbyte.CloudByteISCSIDriver):
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver130, self).__init__(*args, **kwargs)
def _get_volume_size(self, volume):
size = volume.get('size')
return six.text_type(size) + "G"
def _add_qos_group_request(self,
volume,
tsmid,
datasetid,
volume_name,
qos_group_params):
# Prepare the user input params
params = {
"name": "QoS_" + volume_name,
"tsmid": tsmid,
"datasetid": datasetid,
"datasetname": volume_name,
"quotasize": self._get_volume_size(volume)
}
# Get qos related params from configuration
params.update(self.configuration.cb_add_qosgroup)
# Override the default configuration by qos specs
if qos_group_params:
params.update(qos_group_params)
params.update({'throughput': 4 * int(params['iops'])})
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, poolid, volume_name, file_system_params):
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": self._get_volume_size(volume),
"poolid": poolid
}
# Get the additional params from configuration
params.update(self.configuration.cb_create_volume)
# Override the default configuration by qos specs
if file_system_params:
params.update(file_system_params)
data = self._api_request_for_cloudbyte("addVolume2", params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("No TSM was found in CloudByte storage "
"for account [%(account)s].") %
{'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails = tsm
break
if not tsmdetails:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
return tsmdetails
def _get_volume_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
cb_volume = None
for vol in volumes:
if vol['name'] == volume_name:
cb_volume = vol
break
if cb_volume is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return cb_volume
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['path'], 0)
)
# CHAP Authentication related
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['path'], 'proid': volume['id']})
return model_update
def _add_volume_iscsi_service(self, volumeid):
params = {
"volumeid": volumeid,
"status": 'true'
}
self._api_request_for_cloudbyte(
'addVolumeiSCSIService', params)
def _update_qos_group_on_controller(self, qosid, tsmid):
params = {
"type": "qosgroup",
"qosid": qosid,
"tsmid": tsmid
}
self._api_request_for_cloudbyte(
'updateController', params)
def _update_storage_on_controller(self, storageid, tsmid):
params = {
"type": "storage",
"storageid": storageid,
"tsmid": tsmid
}
self._api_request_for_cloudbyte(
'updateController', params)
def _configure_volume_iscsi_on_controller(self, viscsiid):
params = {
"type": 'configurevolumeiscsi',
"viscsiid": viscsiid
}
self._api_request_for_cloudbyte(
'updateController', params)
def create_volume(self, volume):
qos_group_params = {}
file_system_params = {}
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id')
if type_id is not None:
# extract all the extra specs from volume type
extra_specs = self._get_extra_specs_by_volume_type(ctxt, type_id)
# check if the creation is meant for OpenStack migration purposes
if self._check_create_for_migration(volume, extra_specs):
provider = self._create_volume_for_migration(volume, extra_specs)
if provider is not None:
return provider
else:
msg = _("create cb volume - failed "
"- error during create for migration "
"- volume: '%s'") % volume['id']
raise exception.VolumeBackendAPIException(data=msg)
qos_group_params, file_system_params = (
self._get_qos_by_volume_type(ctxt, type_id, extra_specs))
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('id'), tsm_details.get('datasetid'), cb_volume_name, qos_group_params)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('id'), tsm_details.get('poolid'), cb_volume_name, file_system_params)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
#self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
cb_volume = self._get_volume_from_response(cb_volumes,
cb_volume_name)
self._add_volume_iscsi_service(cb_volume['id'])
self._update_qos_group_on_controller(cb_volume['groupid'], tsm_details.get('id'))
self._update_storage_on_controller(cb_volume['id'], tsm_details.get('id'))
params = {"storageid": cb_volume['id']}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
cb_volume['id'], iscsi_service_data)
self._configure_volume_iscsi_on_controller(iscsi_id)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_name = self.configuration.cb_initiator_group_name
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, ig_name)
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider | {
"content_hash": "1e95691fbec8eccdf807073f86f5e8a6",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 106,
"avg_line_length": 36.00651465798045,
"alnum_prop": 0.5521982992581871,
"repo_name": "CloudByteStorages/openstack",
"id": "2f013c1d29f4f773c26582f0433b4797244bd630",
"size": "11683",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "liberty/cloudbyte/cloudbyte130.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "277992"
},
{
"name": "Shell",
"bytes": "641"
}
],
"symlink_target": ""
} |
__author__ = 'DSOWASP'
# This is the Twisted Get Poetry Now! client, version 3.0.
# NOTE: This should not be used as the basis for production code.
import optparse
from twisted.internet.protocol import Protocol, ClientFactory
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, Twisted version 3.0
Run it like this:
python get-poetry-1.py port1 port2 port3 ...
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
print("type:addresses",type(addresses),len(addresses),addresses)
if not addresses:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
class PoetryProtocol(Protocol):
poem = b''
def dataReceived(self, data): # 用于接收数据
print("来数据了!")
self.poem += data # 将一个连接的数据全部放于poem中。
def connectionLost(self, reason): # 如果连接关闭,则调用此函数, 服务器端发送数据完毕,会再代码中主动去调用一
# 次loseConnection()
self.poemReceived(self.poem) # 把数据写回去
def poemReceived(self, poem):
self.factory.poem_finished(poem) # 把数据写回去
class PoetryClientFactory(ClientFactory):
protocol = PoetryProtocol # 给protocol赋值,用于接收处理数据
def __init__(self, callback):
self.callback = callback # 初始化
def poem_finished(self, poem): # 被dataRecvice()调用,用于数据写回
self.callback(poem)
def get_poetry(host, port, callback):
"""
Download a poem from the given host and port and invoke
callback(poem)
when the poem is complete.
"""
from twisted.internet import reactor
factory = PoetryClientFactory(callback) # 客户端Factory句柄
reactor.connectTCP(host, port, factory) # 连接服务器端
def poetry_main():
addresses = parse_args() # py3 返回的是map类型,需要用for 或 __next__()去遍历
lenaddress = 0
from twisted.internet import reactor
poems = [] # 用于存放返回的数据
def got_poem(poem): # 处理返回的数据
print("type:poem",type(poem))
poems.append(poem) # 从上面接收数据的代码可以看到,一个连接的数据全部放到了poem里。
l1 = len(poems)
if l1 == lenaddress:
reactor.stop()
for address in addresses: # 去连接指定的地址
lenaddress += 1
host, port = address
print(host,port)
get_poetry(host, port, got_poem) # 连接多个端口
reactor.run()
for poem in poems:
print(poem.decode("utf-8")) # 将bytes形式转为字符串
if __name__ == '__main__':
poetry_main() | {
"content_hash": "3ffe1b26fcb91fb214e13590e8d04df7",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 93,
"avg_line_length": 26.892857142857142,
"alnum_prop": 0.5627490039840638,
"repo_name": "dianshen/python_day",
"id": "702e8276ce0b807aa24d076812ae7fe34ab63e07",
"size": "3419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day10/Twisted1/fileclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2505"
},
{
"name": "HTML",
"bytes": "74003"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "317154"
}
],
"symlink_target": ""
} |
"""Support for Iperf3 sensors."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from . import ATTR_VERSION, DATA_UPDATED, DOMAIN as IPERF3_DOMAIN, SENSOR_TYPES
ATTRIBUTION = 'Data retrieved using Iperf3'
ICON = 'mdi:speedometer'
ATTR_PROTOCOL = 'Protocol'
ATTR_REMOTE_HOST = 'Remote Server'
ATTR_REMOTE_PORT = 'Remote Port'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info):
"""Set up the Iperf3 sensor."""
sensors = []
for iperf3_host in hass.data[IPERF3_DOMAIN].values():
sensors.extend(
[Iperf3Sensor(iperf3_host, sensor) for sensor in discovery_info]
)
async_add_entities(sensors, True)
class Iperf3Sensor(RestoreEntity):
"""A Iperf3 sensor implementation."""
def __init__(self, iperf3_data, sensor_type):
"""Initialize the sensor."""
self._name = \
"{} {}".format(SENSOR_TYPES[sensor_type][0], iperf3_data.host)
self._state = None
self._sensor_type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._iperf3_data = iperf3_data
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return icon."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_PROTOCOL: self._iperf3_data.protocol,
ATTR_REMOTE_HOST: self._iperf3_data.host,
ATTR_REMOTE_PORT: self._iperf3_data.port,
ATTR_VERSION: self._iperf3_data.data[ATTR_VERSION]
}
@property
def should_poll(self):
"""Return the polling requirement for this sensor."""
return False
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
async_dispatcher_connect(
self.hass, DATA_UPDATED, self._schedule_immediate_update
)
def update(self):
"""Get the latest data and update the states."""
data = self._iperf3_data.data.get(self._sensor_type)
if data is not None:
self._state = round(data, 2)
@callback
def _schedule_immediate_update(self, host):
if host == self._iperf3_data.host:
self.async_schedule_update_ha_state(True)
| {
"content_hash": "33747df6f815ae7b3f8f0e1550ab274e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 30.744897959183675,
"alnum_prop": 0.6272817789578493,
"repo_name": "aequitas/home-assistant",
"id": "efc34d8bdef000ddc69cb23858750895d0fe46b4",
"size": "3013",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/iperf3/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param Failed: {"optional": true, "size": "8", "type": "number", "oid": "4", "format": "counter"}
:param Total_Used: {"optional": true, "size": "8", "type": "number", "oid": "2", "format": "counter"}
:param Total_Freed: {"optional": true, "size": "8", "type": "number", "oid": "3", "format": "counter"}
:param Port_Usage: {"optional": true, "size": "8", "type": "number", "oid": "1", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.Failed = ""
self.Total_Used = ""
self.Total_Freed = ""
self.Port_Usage = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Pool(A10BaseClass):
"""Class Description::
Statistics for the object pool.
Class pool supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param pool_name: {"description": "Specify pool name", "format": "string-rlx", "minLength": 1, "oid": "1001", "optional": false, "maxLength": 63, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ipv6/nat/pool/{pool_name}/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "pool_name"]
self.b_key = "pool"
self.a10_url="/axapi/v3/ipv6/nat/pool/{pool_name}/stats"
self.DeviceProxy = ""
self.stats = {}
self.pool_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "2066c27f6629fbd6df3199389f1d87ce",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 167,
"avg_line_length": 31.6,
"alnum_prop": 0.5871470301850049,
"repo_name": "amwelch/a10sdk-python",
"id": "b147c30d18e6e3b1a767ea201c5a3ad836f9817e",
"size": "2054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/ipv6/ipv6_nat_pool_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
} |
import time
import json
import urllib
import urllib2
import copy
from api import FoursquareRequestError, RateLimitExceededError
class VenueAPIGateway:
"""
An object that interfaces with the foursquare API. All HTTP queries to the
API should be carried out through a single gateway.
Deals with unauthenticated queries to the VenueAPI, when only client_id and
client_secret need to be provided rather than an access_token
Provides local query rate limiting. If specified, the gateway will delay
issuing API queries to prevent exceeding the hourly request quota.
"""
def __init__( self, client_id, client_secret, token_hourly_query_quota=5000 ):
"""
`token_hourly_query_quota` is the maximum number of queries per hour.
"""
self.client_id = client_id
self.client_secret = client_secret
#
# Query delaying...
if token_hourly_query_quota is not None:
query_interval = ( 60 * 60 ) / float( token_hourly_query_quota ) # in seconds
self.earliest_query_time = time.time() # as secs since unix epoch
self.query_interval = query_interval
# The time to wait between issuing queries
else:
self.query_interval = None
self.earliest_query_time = None
#
# URL...
scheme = 'https://'
netloc = 'api.foursquare.com'
path_prefix = '/v2'
self.api_base_url = scheme + netloc + path_prefix
def query( self, path_suffix, get_params ):
"""
Issue a query to the foursquare web service.
This method will handle inserting client_id and client_secret.
`get_params` is the GET parameters; a dictionary.
`path_suffix` is appended to the API's base path. The left-most
'/' is inserted if absent.s
If query is successful the method returns JSON data encoded as
python objects via `json.loads()`.
This method interprets any errors returned by the query and raises
errors accordingly.
Other than the conversion to python objects, the structure and values
of the data are unaltered. All three foursquare top-level attributes
are included; i.e., meta, notifications, response.
"""
#
# Do sleep for delay query if necessary...
if self.query_interval is not None:
while time.time() < self.earliest_query_time:
sleep_dur = self.earliest_query_time - time.time()
time.sleep( sleep_dur )
#~ Potential for scheduler thrashing if time difference
# is tiny? Near-zero millis rounded down => repeated looping?
#
# Build & issue request...
params = copy.copy( get_params )
params['client_id'] = self.client_id
params['client_secret'] = self.client_secret
path_suffix = path_suffix.lstrip( '/' )
url = self.api_base_url + '/' + path_suffix + "?" + urllib.urlencode( params )
try:
response = urllib2.urlopen( url )
except urllib2.HTTPError, e:
raise e
except urllib2.URLError, e:
raise e
raw_data = response.read()
py_data = json.loads( raw_data )
# Request error handling...
response_code = int( py_data['meta']['code'] )
if response_code != 200:
error_type = py_data['meta']['errorType'][0]
error_detail = py_data['meta']['errorDetail'][0]
if error_type == 'rate_limit_exceeded':
raise RateLimitExceededError( response_code, error_type,
error_detail )
raise FoursquareRequestError( response_code, error_type,
error_detail )
if self.query_interval is not None:
self.earliest_query_time = time.time() + self.query_interval
#
# Fin
return py_data
if __name__ == "__main__":
import _credentials
from api import APIWrapper
client_id = _credentials.client_id
client_secret = _credentials.client_secret
gateway = VenueAPIGateway( client_id=client_id, client_secret=client_secret )
api = APIWrapper( gateway )
print "Grab info about a given venue..."
reply = api.query_resource( "venues", "591313" )
data = reply['response']
print data
print
print "Search for venues near a given location..."
venues = api.find_venues_near( 51.4777, -3.1844 )
for v in venues:
print v['name']
| {
"content_hash": "03e091195917fd7661703fb286fcfbe7",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 91,
"avg_line_length": 35.916666666666664,
"alnum_prop": 0.5859523307319131,
"repo_name": "mattjw/cf4sq",
"id": "5e7ff538eedbf32c6f51f3cf4a132c1366546d33",
"size": "5386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prototype/venues_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "153335"
},
{
"name": "Shell",
"bytes": "898"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("..")
from util.validator import Validator, ValidationRule, ValidationException, NULL_DATE
# Libs.
import unittest
import datetime
class ValidatorTests(unittest.TestCase):
def _assertOK(self, in_val, out_val, rule):
self.assertEqual(Validator.validate(in_val, rule=rule), out_val)
def _assertFail(self, in_val, exception_class, rule):
self.assertRaises(exception_class, Validator.validate, in_val, rule=rule)
def testStrValidation(self):
# Test min length and max length.
rule1 = ValidationRule(type="str", min_length=1, max_length=5)
self._assertOK("ololo", "ololo", rule1)
self._assertFail("", ValidationException, rule1)
self._assertFail("123456", ValidationException, rule1)
# Test mask.
rule2 = ValidationRule(type="str", max_length=10, mask="\d\d\d\d-\d\d-\d\d")
self._assertOK("2012-12-31", "2012-12-31", rule2)
self._assertFail("2012-12-XX", ValidationException, rule=rule2)
# Test stripping.
rule3 = ValidationRule(type="str", min_length=1, max_length=5)
self._assertFail(" ololo ", ValidationException, rule3)
rule3.strip_required = True
self._assertOK(" ololo ", "ololo", rule3)
def testIntValidation(self):
rule = ValidationRule(type="int", min_value=1, max_value=5)
self._assertOK("3", 3, rule)
self._assertFail("0", ValidationException, rule)
self._assertFail("6", ValidationException, rule)
self._assertFail("z", ValueError, rule)
def testDateValidation(self):
rule = ValidationRule(type="date")
self._assertOK("23.04.2013", datetime.date(2013, 4, 23), rule)
self._assertFail("23-04.2013", ValidationException, rule)
self._assertFail("", ValidationException, rule)
rule.null_date_permitted = True
self._assertOK("", NULL_DATE, rule)
# 29.02.2013 - incorrect date (only 28 days in february 2013).
self._assertFail("29.02.2013", ValidationException, rule)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0924911335b78728ff8d37d93b68b821",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 84,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.6463068181818182,
"repo_name": "ololobster/cvidone",
"id": "8214e6300633de0dae50e53d668ef57631567fb7",
"size": "2220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/validator_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9702"
},
{
"name": "HTML",
"bytes": "13562"
},
{
"name": "JavaScript",
"bytes": "134331"
},
{
"name": "PLpgSQL",
"bytes": "20989"
},
{
"name": "Python",
"bytes": "130461"
},
{
"name": "Shell",
"bytes": "3410"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import itertools
class VisError(Exception):
pass
class Node(object):
def __init__(self, seq, obj, cluster=None):
self.obj = obj
self.seq = seq
self.cluster = cluster
self.content = OrderedDict()
self.style = None
self.fillcolor = None
self.color = None
self.width = None
self.url = None
self.tooltip = None
def set_cluster(cluster):
if self.cluster != None:
self.cluster.remove_node(self)
cluster.add_node(self)
node.cluster = cluster
def __eq__(self, other):
return self.obj.__eq__(other.obj) and self.seq == other.seq
def __hash__(self):
return self.obj.__hash__()
class Edge(object):
def __init__(self, src, dst, meta = {}, color=None, label=None, style=None, width=None, weight=None):
self.src = src
self.dst = dst
self.meta = meta
self.color = color
self.label = label
self.style = style
self.width = width
self.weight = weight
def __eq__(self, other):
return self.src == other.src and self.dst == other.dst
def __hash__(self):
return hash(self.src, self.dst)
class Source(object):
def __init__(self):
pass
def set_vis(self, vis):
self.vis = vis
def parse(self, obj):
raise NotImplementedError('parse() is not implemented.')
class NodeAnnotator(object):
def __init__(self):
self.graph = None
def set_graph(self, graph):
self.graph = graph
def annotate_node(self, node):
raise NotImplementedError('annotate_node() is not implemented.')
class EdgeAnnotator(object):
def __init__(self):
self.graph = None
def set_graph(self, graph):
self.graph = graph
def annotate_edge(self, edge):
raise NotImplementedError('annotate_edge() is not implemented.')
class Transformer(object):
def __init__(self):
pass
def transform(self, graph):
raise NotImplementedError('transform() is not implemented.')
class Clusterer(object):
def __init__(self):
pass
def cluster(self, graph):
raise NotImplementedError('cluster() is not implemented.')
class Content(object):
def __init__(self, name, columns):
self.name = name
self.columns = columns
self.annotators = []
def get_columns(self):
return self.columns
def add_column_after(self, column):
if column not in self.columns:
self.columns.append(column)
def add_column_before(self, column):
if column not in self.columns:
self.columns.insert(0, column)
def add_annotator(self, obj):
obj.register(self)
self.annotators.append(obj)
def render(self, n):
self.gen_render(n)
for an in self.annotators:
if self.name in n.content:
an.annotate_content(n, n.content[self.name])
class ContentAnnotator(object):
def __init__(self, cname):
self.cname = cname
def get_cname(self):
return self.cname
def annotate_content(self, content):
raise NotImplementedError('annotate_content() is not implemented.')
class Cluster(object):
def __init__(self, key, parent=None, nodes=None, label=None, visible=True):
self.key = key
self.parent = parent
self.nodes = nodes if nodes else set()
self.visible = visible
self.label = label
self.style = None;
self.fillcolor = None;
def add_node(self, node):
self.nodes.add(node)
node.cluster = self
def remove_node(self, node):
self.nodes.remove(node)
node.cluster = None
class Graph(object):
def __init__(self, nodes=None, edges=None):
self.nodes = nodes if nodes else set()
self.edges = edges if edges else []
self.seqctr = itertools.count()
self.seqmap = {}
self.clusters = {}
def create_cluster(self, key, parent=None, nodes=None, label=None, visible=True):
cluster = Cluster(key, parent, nodes, label, visible)
self.clusters[key] = cluster
self.seqmap[key] = next(self.seqctr)
return cluster
def get_cluster(self, key):
if key in self.clusters:
return self.clusters[key]
else:
return None
def get_clusters(self, parent=None):
return list(filter(lambda c:c.parent==parent, self.clusters.values()))
def add_node(self, node):
self.nodes.add(node)
if node.cluster:
node.cluster.add_node(node)
def add_edge(self, edge):
self.edges.append(edge)
def remove_node(self, node):
self.nodes.remove(node)
self.edges = list(filter(lambda edge: edge.src != node and edge.dst != node, self.edges))
def remove_edge(self, edge):
self.edges.remove(edge)
#TODO FIXME thats bad
def filter_nodes(self, node_filter):
new_graph = self.filtered_view(node_filter)
self.nodes = new_graph.nodes
self.edges = new_graph.edges
def filtered_view(self, node_filter):
nodes = list(filter(lambda _: node_filter(_), self.nodes))
edges = list(filter(lambda edge: node_filter(edge.src) and node_filter(edge.dst), self.edges))
return Graph(nodes, edges)
class VisPipeLine(object):
def __init__(self):
self.content = OrderedDict()
self.node_annotators = []
self.edge_annotators = []
self.transformers = []
self.clusterers = []
self.graph = Graph()
def set_source(self, source):
if not isinstance(source, Source):
raise VisError("Incompatible source type '%s'" % type(obj))
self.source = source
def add_content(self, obj):
if not isinstance(obj, Content):
raise VisError("Incompatible content type '%s'" % type(obj))
self.content[obj.name] = obj
def add_node_annotator(self, obj):
if not isinstance(obj, NodeAnnotator):
raise VisError("Incompatible node annotator type '%s'" % type(obj))
self.node_annotators.append(obj)
return self
def add_edge_annotator(self, obj):
if not isinstance(obj, EdgeAnnotator):
raise VisError("Incompatible edge annotator type '%s'" % type(obj))
self.edge_annotators.append(obj)
return self
def add_clusterer(self, obj):
if not isinstance(obj, Clusterer):
raise VisError("Incompatible clusterer type '%s'" % type(obj))
self.clusterers.append(obj)
return self
def add_content_annotator(self, obj):
if not isinstance(obj, ContentAnnotator):
raise VisError("Incompatible content annotator type '%s'" % type(obj))
cname = obj.get_cname()
if cname not in self.content:
raise VisError("Content '%s' not found, required by annotator '%s'" % (cname, type(obj)))
self.content[cname].add_annotator(obj)
return self
def add_transformer(self, obj):
if not isinstance(obj, Transformer):
raise VisError("Incompatible transformer type '%s'" % type(obj))
self.transformers.append(obj)
return self
def set_input(self, obj):
self.source.parse(obj, self.graph)
def preprocess(self, obj, filter=None):
self.set_input(obj)
for t in self.transformers:
t.transform(self.graph)
def process(self, filter=None):
if filter is None:
graph = self.graph
else:
graph = self.graph.filtered_view(filter)
for ea in self.edge_annotators:
ea.set_graph(graph)
for na in self.node_annotators:
na.set_graph(graph)
for n in graph.nodes:
for c in self.content.values():
c.render(n)
for na in self.node_annotators:
na.annotate_node(n)
for e in graph.edges:
for ea in self.edge_annotators:
ea.annotate_edge(e)
for c in self.clusterers:
c.cluster(graph)
return graph
class Vis(object):
def __init__(self):
self.pipeline = VisPipeLine()
def preprocess(self, obj):
self.pipeline.preprocess(obj)
def process(self, obj=None, filter=None):
if obj:
self.preprocess(obj)
graph = self.pipeline.process(filter=filter)
return self.output.generate(graph)
def set_source(self, source):
self.pipeline.set_source(source)
return self
def add_content(self, obj):
self.pipeline.add_content(obj)
return self
def add_node_annotator(self, obj):
self.pipeline.add_node_annotator(obj)
return self
def add_edge_annotator(self, obj):
self.pipeline.add_edge_annotator(obj)
return self
def add_content_annotator(self, obj):
self.pipeline.add_content_annotator(obj)
return self
def add_clusterer(self, obj):
self.pipeline.add_clusterer(obj)
return self
def add_transformer(self, obj):
self.pipeline.add_transformer(obj)
return self
def set_output(self, output):
if not isinstance(output, Output):
raise VisError("Incompatible output type '%s'" % type(obj))
self.output = output
class Output(object):
def __init__(self):
pass
def set_vis(self, vis):
self.vis = vis
def generate():
raise NotImplementedError('parse() is not implemented.')
| {
"content_hash": "233a36dabb6303a0b4e73e2bc66fe4f3",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 105,
"avg_line_length": 28.353276353276353,
"alnum_prop": 0.5794815112540193,
"repo_name": "axt/bingraphvis",
"id": "760c484b477aa7f1c265ac68f1508b5069dd38fb",
"size": "9954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bingraphvis/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "90036"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2015 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
from Queue import Queue
import axis_ep
module = 'dsp_mult'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_dsp_mult(clk,
rst,
current_test,
input_a_tdata,
input_a_tvalid,
input_a_tready,
input_b_tdata,
input_b_tvalid,
input_b_tready,
output_tdata,
output_tvalid,
output_tready):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_a_tdata=input_a_tdata,
input_a_tvalid=input_a_tvalid,
input_a_tready=input_a_tready,
input_b_tdata=input_b_tdata,
input_b_tvalid=input_b_tvalid,
input_b_tready=input_b_tready,
output_tdata=output_tdata,
output_tvalid=output_tvalid,
output_tready=output_tready)
def bench():
# Parameters
WIDTH = 16
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_a_tdata = Signal(intbv(0)[WIDTH:])
input_a_tvalid = Signal(bool(0))
input_b_tdata = Signal(intbv(0)[WIDTH:])
input_b_tvalid = Signal(bool(0))
output_tready = Signal(bool(0))
# Outputs
input_a_tready = Signal(bool(0))
input_b_tready = Signal(bool(0))
output_tdata = Signal(intbv(0)[WIDTH*2:])
output_tvalid = Signal(bool(0))
# sources and sinks
input_a_source_queue = Queue()
input_a_source_pause = Signal(bool(0))
input_b_source_queue = Queue()
input_b_source_pause = Signal(bool(0))
output_sink_queue = Queue()
output_sink_pause = Signal(bool(0))
input_a_source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_a_tdata,
tvalid=input_a_tvalid,
tready=input_a_tready,
fifo=input_a_source_queue,
pause=input_a_source_pause,
name='input_a_source')
input_b_source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_b_tdata,
tvalid=input_b_tvalid,
tready=input_b_tready,
fifo=input_b_source_queue,
pause=input_b_source_pause,
name='input_b_source')
output_sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_tdata,
tvalid=output_tvalid,
tready=output_tready,
fifo=output_sink_queue,
pause=output_sink_pause,
name='output_sink')
# DUT
dut = dut_dsp_mult(clk,
rst,
current_test,
input_a_tdata,
input_a_tvalid,
input_a_tready,
input_b_tdata,
input_b_tvalid,
input_b_tready,
output_tdata,
output_tvalid,
output_tready)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: test multiplier")
current_test.next = 1
test_frame1 = axis_ep.AXIStreamFrame()
test_frame1.data = [123] + [0]*4
test_frame2 = axis_ep.AXIStreamFrame()
test_frame2.data = [456] + [0]*4
input_a_source_queue.put(test_frame1)
input_b_source_queue.put(test_frame2)
yield clk.posedge
yield clk.posedge
yield clk.posedge
yield clk.posedge
while output_sink_queue.empty():
yield clk.posedge
yield clk.posedge
yield clk.posedge
for i in range(4):
rx_frame = output_sink_queue.get(False)
rx_frame = output_sink_queue.get(False)
assert rx_frame.data[0] == 123*456
yield delay(100)
raise StopSimulation
return dut, input_a_source, input_b_source, output_sink, clkgen, check
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| {
"content_hash": "41f9c0d34507b5d0489b01993320aed2",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 77,
"avg_line_length": 31.99,
"alnum_prop": 0.5175054704595186,
"repo_name": "alexforencich/hdg2000",
"id": "89aaeacbf5442cdc0f5c33403d0378d213291aa4",
"size": "6421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fpga/lib/dsp/tb/test_dsp_mult.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9054"
},
{
"name": "Python",
"bytes": "934476"
},
{
"name": "Shell",
"bytes": "8661"
},
{
"name": "Verilog",
"bytes": "687285"
}
],
"symlink_target": ""
} |
import contextlib
import sys
import six
import traceback
from snipping.utils import fileutil
from snipping.utils import strutil
@contextlib.contextmanager
def reopen_stdout_stderr():
f = fileutil.temp_file()
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stderr = sys.stdout = f
try:
yield f
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def _exception(top_frame):
t, v, tb = sys.exc_info()
tbs = traceback.extract_tb(tb)
for seq, tb_tuple in enumerate(tbs):
if tb_tuple[0] == top_frame:
tbs = tbs[seq:]
break
outputs = traceback.format_list(tbs)
if outputs:
outputs.insert(0, u"Traceback (most recent call last):\n")
outputs.extend(traceback.format_exception_only(t, v))
return ''.join(outputs)
def exec_globals():
return {
'__builtins__': six.moves.builtins,
'__doc__': None,
'__name__': '__main__',
'__package__': None,
}
def exec_locals():
return {}
def compile_text(content, from_file=None):
return compile(content, from_file or '<stdin>', 'exec')
def execwrap(content, from_file=None):
from_file = from_file or '<stdin>'
if isinstance(content, six.string_types):
content = compile_text(content, from_file=from_file)
def _inner():
global_env = exec_globals()
local_env = global_env
six.exec_(content, global_env, local_env)
return global_env
globals_ = {}
output_handler = None
try:
with reopen_stdout_stderr() as output_handler:
globals_ = _inner()
except Exception:
if output_handler is not None:
output = "%s%s" % (output_handler.read(),
_exception(from_file))
else:
output = _exception(from_file)
else:
output = output_handler.read()
output = strutil.ensure_text(output)
return output, globals_
| {
"content_hash": "aeb4eb4f7ad959414bd6a840c36de81d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 66,
"avg_line_length": 24.036585365853657,
"alnum_prop": 0.595636732623034,
"repo_name": "yittg/Snipping",
"id": "b528f44939f00999a0f8f36d951d481df4356c85",
"size": "1971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snipping/utils/executil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28557"
}
],
"symlink_target": ""
} |
"""Impl. of default bucket storage class command for Google Cloud Storage."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
from gslib.utils.text_util import NormalizeStorageClass
_SET_SYNOPSIS = """
gsutil defstorageclass set <storage-class> bucket_url...
"""
_GET_SYNOPSIS = """
gsutil defstorageclass get bucket_url...
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n')
_SET_DESCRIPTION = """
<B>SET</B>
The "defstorageclass set" command sets the default
`storage class <https://cloud.google.com/storage/docs/storage-classes>`_ for
the specified bucket(s). If you specify a default storage class for a certain
bucket, Google Cloud Storage applies the default storage class to all new
objects uploaded to that bucket, except when the storage class is overridden
by individual upload requests.
Setting a default storage class on a bucket provides a convenient way to
ensure newly uploaded objects have a specific storage class. If you don't set
the bucket's default storage class, it will default to Standard.
"""
_GET_DESCRIPTION = """
<B>GET</B>
Gets the default storage class for a bucket.
"""
_DESCRIPTION = """
The defstorageclass command has two sub-commands:
""" + '\n'.join([_SET_DESCRIPTION + _GET_DESCRIPTION])
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
class DefStorageClassCommand(Command):
"""Implementation of gsutil defstorageclass command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'defstorageclass',
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=2,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
# FreeTextArgument allows for using storage class abbreviations.
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'get': [CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),],
},
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='defstorageclass',
help_name_aliases=['defaultstorageclass'],
help_type='command_help',
help_one_line_summary='Get or set the default storage class on buckets',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _CheckIsGsUrl(self, url_str):
if not url_str.startswith('gs://'):
raise CommandException(
'"%s" does not support the URL "%s". Did you mean to use a gs:// '
'URL?' % (self.command_name, url_str))
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if self.args[0].lower() == 'set':
return 2
else:
return 1
def _SetDefStorageClass(self):
"""Sets the default storage class for a bucket."""
# At this point, "set" has been popped off the front of self.args.
normalized_storage_class = NormalizeStorageClass(self.args[0])
url_args = self.args[1:]
if not url_args:
self.RaiseWrongNumberOfArgumentsException()
some_matched = False
for url_str in url_args:
self._CheckIsGsUrl(url_str)
# Throws a CommandException if the argument is not a bucket.
bucket_iter = self.GetBucketUrlIterFromArg(url_str, bucket_fields=['id'])
for blr in bucket_iter:
some_matched = True
bucket_metadata = apitools_messages.Bucket()
self.logger.info('Setting default storage class to "%s" for bucket %s' %
(normalized_storage_class, blr.url_string.rstrip('/')))
bucket_metadata.storageClass = normalized_storage_class
self.gsutil_api.PatchBucket(blr.storage_url.bucket_name,
bucket_metadata,
provider=blr.storage_url.scheme,
fields=['id'])
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
def _GetDefStorageClass(self):
"""Gets the default storage class for a bucket."""
# At this point, "get" has been popped off the front of self.args.
url_args = self.args
some_matched = False
for url_str in url_args:
self._CheckIsGsUrl(url_str)
bucket_iter = self.GetBucketUrlIterFromArg(url_str,
bucket_fields=['storageClass'])
for blr in bucket_iter:
some_matched = True
print('%s: %s' %
(blr.url_string.rstrip('/'), blr.root_object.storageClass))
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
def RunCommand(self):
"""Command entry point for the defstorageclass command."""
action_subcommand = self.args.pop(0)
subcommand_args = [action_subcommand]
if action_subcommand == 'get':
func = self._GetDefStorageClass
elif action_subcommand == 'set':
func = self._SetDefStorageClass
normalized_storage_class = NormalizeStorageClass(self.args[0])
subcommand_args.append(normalized_storage_class)
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help %s".') %
(action_subcommand, self.command_name, self.command_name))
metrics.LogCommandParams(subcommands=subcommand_args)
func()
return 0
| {
"content_hash": "c03c5f280eef701c4774ff1aa0d7fca9",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 87,
"avg_line_length": 37.67857142857143,
"alnum_prop": 0.6725118483412322,
"repo_name": "endlessm/chromium-browser",
"id": "2d9c20ae21fe73c6cc58513f0a03a613721514ab",
"size": "6950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/third_party/gsutil/gslib/commands/defstorageclass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Discover and run std-library "unittest" style tests."""
import sys
import traceback
import types
from typing import Any
from typing import Callable
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
import _pytest._code
import pytest
from _pytest.compat import getimfunc
from _pytest.compat import is_async_function
from _pytest.config import hookimpl
from _pytest.fixtures import FixtureRequest
from _pytest.nodes import Collector
from _pytest.nodes import Item
from _pytest.outcomes import exit
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.python import Class
from _pytest.python import Function
from _pytest.python import Module
from _pytest.runner import CallInfo
from _pytest.scope import Scope
if TYPE_CHECKING:
import unittest
import twisted.trial.unittest
_SysExcInfoType = Union[
Tuple[Type[BaseException], BaseException, types.TracebackType],
Tuple[None, None, None],
]
def pytest_pycollect_makeitem(
collector: Union[Module, Class], name: str, obj: object
) -> Optional["UnitTestCase"]:
# Has unittest been imported and is obj a subclass of its TestCase?
try:
ut = sys.modules["unittest"]
# Type ignored because `ut` is an opaque module.
if not issubclass(obj, ut.TestCase): # type: ignore
return None
except Exception:
return None
# Yes, so let's collect it.
item: UnitTestCase = UnitTestCase.from_parent(collector, name=name, obj=obj)
return item
class UnitTestCase(Class):
# Marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs.
nofuncargs = True
def collect(self) -> Iterable[Union[Item, Collector]]:
from unittest import TestLoader
cls = self.obj
if not getattr(cls, "__test__", True):
return
skipped = _is_skipped(cls)
if not skipped:
self._inject_setup_teardown_fixtures(cls)
self._inject_setup_class_fixture()
self.session._fixturemanager.parsefactories(self, unittest=True)
loader = TestLoader()
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
if not getattr(x, "__test__", True):
continue
funcobj = getimfunc(x)
yield TestCaseFunction.from_parent(self, name=name, callobj=funcobj)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, "runTest", None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
# Type ignored because `ut` is an opaque module.
if ut is None or runtest != ut.TestCase.runTest: # type: ignore
yield TestCaseFunction.from_parent(self, name="runTest")
def _inject_setup_teardown_fixtures(self, cls: type) -> None:
"""Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding
teardown functions (#517)."""
class_fixture = _make_xunit_fixture(
cls,
"setUpClass",
"tearDownClass",
"doClassCleanups",
scope=Scope.Class,
pass_self=False,
)
if class_fixture:
cls.__pytest_class_setup = class_fixture # type: ignore[attr-defined]
method_fixture = _make_xunit_fixture(
cls,
"setup_method",
"teardown_method",
None,
scope=Scope.Function,
pass_self=True,
)
if method_fixture:
cls.__pytest_method_setup = method_fixture # type: ignore[attr-defined]
def _make_xunit_fixture(
obj: type,
setup_name: str,
teardown_name: str,
cleanup_name: Optional[str],
scope: Scope,
pass_self: bool,
):
setup = getattr(obj, setup_name, None)
teardown = getattr(obj, teardown_name, None)
if setup is None and teardown is None:
return None
if cleanup_name:
cleanup = getattr(obj, cleanup_name, lambda *args: None)
else:
def cleanup(*args):
pass
@pytest.fixture(
scope=scope.value,
autouse=True,
# Use a unique name to speed up lookup.
name=f"_unittest_{setup_name}_fixture_{obj.__qualname__}",
)
def fixture(self, request: FixtureRequest) -> Generator[None, None, None]:
if _is_skipped(self):
reason = self.__unittest_skip_why__
raise pytest.skip.Exception(reason, _use_item_location=True)
if setup is not None:
try:
if pass_self:
setup(self, request.function)
else:
setup()
# unittest does not call the cleanup function for every BaseException, so we
# follow this here.
except Exception:
if pass_self:
cleanup(self)
else:
cleanup()
raise
yield
try:
if teardown is not None:
if pass_self:
teardown(self, request.function)
else:
teardown()
finally:
if pass_self:
cleanup(self)
else:
cleanup()
return fixture
class TestCaseFunction(Function):
nofuncargs = True
_excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None
_testcase: Optional["unittest.TestCase"] = None
def _getobj(self):
assert self.parent is not None
# Unlike a regular Function in a Class, where `item.obj` returns
# a *bound* method (attached to an instance), TestCaseFunction's
# `obj` returns an *unbound* method (not attached to an instance).
# This inconsistency is probably not desirable, but needs some
# consideration before changing.
return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined]
def setup(self) -> None:
# A bound method to be called during teardown() if set (see 'runtest()').
self._explicit_tearDown: Optional[Callable[[], None]] = None
assert self.parent is not None
self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined]
self._obj = getattr(self._testcase, self.name)
if hasattr(self, "_request"):
self._request._fillfixtures()
def teardown(self) -> None:
if self._explicit_tearDown is not None:
self._explicit_tearDown()
self._explicit_tearDown = None
self._testcase = None
self._obj = None
def startTest(self, testcase: "unittest.TestCase") -> None:
pass
def _addexcinfo(self, rawexcinfo: "_SysExcInfoType") -> None:
# Unwrap potential exception info (see twisted trial support below).
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
try:
excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(rawexcinfo) # type: ignore[arg-type]
# Invoke the attributes to trigger storing the traceback
# trial causes some issue there.
excinfo.value
excinfo.traceback
except TypeError:
try:
try:
values = traceback.format_exception(*rawexcinfo)
values.insert(
0,
"NOTE: Incompatible Exception Representation, "
"displaying natively:\n\n",
)
fail("".join(values), pytrace=False)
except (fail.Exception, KeyboardInterrupt):
raise
except BaseException:
fail(
"ERROR: Unknown Incompatible Exception "
"representation:\n%r" % (rawexcinfo,),
pytrace=False,
)
except KeyboardInterrupt:
raise
except fail.Exception:
excinfo = _pytest._code.ExceptionInfo.from_current()
self.__dict__.setdefault("_excinfo", []).append(excinfo)
def addError(
self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType"
) -> None:
try:
if isinstance(rawexcinfo[1], exit.Exception):
exit(rawexcinfo[1].msg)
except TypeError:
pass
self._addexcinfo(rawexcinfo)
def addFailure(
self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType"
) -> None:
self._addexcinfo(rawexcinfo)
def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None:
try:
raise pytest.skip.Exception(reason, _use_item_location=True)
except skip.Exception:
self._addexcinfo(sys.exc_info())
def addExpectedFailure(
self,
testcase: "unittest.TestCase",
rawexcinfo: "_SysExcInfoType",
reason: str = "",
) -> None:
try:
xfail(str(reason))
except xfail.Exception:
self._addexcinfo(sys.exc_info())
def addUnexpectedSuccess(
self,
testcase: "unittest.TestCase",
reason: Optional["twisted.trial.unittest.Todo"] = None,
) -> None:
msg = "Unexpected success"
if reason:
msg += f": {reason.reason}"
# Preserve unittest behaviour - fail the test. Explicitly not an XPASS.
try:
fail(msg, pytrace=False)
except fail.Exception:
self._addexcinfo(sys.exc_info())
def addSuccess(self, testcase: "unittest.TestCase") -> None:
pass
def stopTest(self, testcase: "unittest.TestCase") -> None:
pass
def runtest(self) -> None:
from _pytest.debugging import maybe_wrap_pytest_function_for_tracing
assert self._testcase is not None
maybe_wrap_pytest_function_for_tracing(self)
# Let the unittest framework handle async functions.
if is_async_function(self.obj):
# Type ignored because self acts as the TestResult, but is not actually one.
self._testcase(result=self) # type: ignore[arg-type]
else:
# When --pdb is given, we want to postpone calling tearDown() otherwise
# when entering the pdb prompt, tearDown() would have probably cleaned up
# instance variables, which makes it difficult to debug.
# Arguably we could always postpone tearDown(), but this changes the moment where the
# TestCase instance interacts with the results object, so better to only do it
# when absolutely needed.
if self.config.getoption("usepdb") and not _is_skipped(self.obj):
self._explicit_tearDown = self._testcase.tearDown
setattr(self._testcase, "tearDown", lambda *args: None)
# We need to update the actual bound method with self.obj, because
# wrap_pytest_function_for_tracing replaces self.obj by a wrapper.
setattr(self._testcase, self.name, self.obj)
try:
self._testcase(result=self) # type: ignore[arg-type]
finally:
delattr(self._testcase, self.name)
def _prunetraceback(
self, excinfo: _pytest._code.ExceptionInfo[BaseException]
) -> None:
super()._prunetraceback(excinfo)
traceback = excinfo.traceback.filter(
lambda x: not x.frame.f_globals.get("__unittest")
)
if traceback:
excinfo.traceback = traceback
@hookimpl(tryfirst=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None:
if isinstance(item, TestCaseFunction):
if item._excinfo:
call.excinfo = item._excinfo.pop(0)
try:
del call.result
except AttributeError:
pass
# Convert unittest.SkipTest to pytest.skip.
# This is actually only needed for nose, which reuses unittest.SkipTest for
# its own nose.SkipTest. For unittest TestCases, SkipTest is already
# handled internally, and doesn't reach here.
unittest = sys.modules.get("unittest")
if (
unittest
and call.excinfo
and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined]
):
excinfo = call.excinfo
call2 = CallInfo[None].from_call(
lambda: pytest.skip(str(excinfo.value)), call.when
)
call.excinfo = call2.excinfo
# Twisted trial support.
@hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
ut: Any = sys.modules["twisted.python.failure"]
Failure__init__ = ut.Failure.__init__
check_testcase_implements_trial_reporter()
def excstore(
self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
):
if exc_value is None:
self._rawexcinfo = sys.exc_info()
else:
if exc_type is None:
exc_type = type(exc_value)
self._rawexcinfo = (exc_type, exc_value, exc_tb)
try:
Failure__init__(
self, exc_value, exc_type, exc_tb, captureVars=captureVars
)
except TypeError:
Failure__init__(self, exc_value, exc_type, exc_tb)
ut.Failure.__init__ = excstore
yield
ut.Failure.__init__ = Failure__init__
else:
yield
def check_testcase_implements_trial_reporter(done: List[int] = []) -> None:
if done:
return
from zope.interface import classImplements
from twisted.trial.itrial import IReporter
classImplements(TestCaseFunction, IReporter)
done.append(1)
def _is_skipped(obj) -> bool:
"""Return True if the given object has been marked with @unittest.skip."""
return bool(getattr(obj, "__unittest_skip__", False))
| {
"content_hash": "3283cef971ebf5ba9d21f4e9173d15e6",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 116,
"avg_line_length": 34.91787439613527,
"alnum_prop": 0.5966380741560597,
"repo_name": "Akasurde/pytest",
"id": "851e4943b23efecfe30d0aad9b7529882f4e1bb9",
"size": "14456",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/_pytest/unittest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2594260"
}
],
"symlink_target": ""
} |
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
| {
"content_hash": "ba44275e630035b0fe24a0331fb59a72",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 68,
"avg_line_length": 36.59375,
"alnum_prop": 0.6080273270708796,
"repo_name": "alex-leonhardt/sensu-grid",
"id": "9649e6138f7ae35577dddd3a2e122f2c97e925f5",
"size": "1171",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "reverseproxied.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "145082"
},
{
"name": "Dockerfile",
"bytes": "946"
},
{
"name": "HTML",
"bytes": "16073"
},
{
"name": "Python",
"bytes": "22733"
},
{
"name": "Shell",
"bytes": "201"
}
],
"symlink_target": ""
} |
import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from modelcluster.contrib.taggit import ClusterTaggableManager
from wagtail.wagtailcore.fields import RichTextField
class EntryAbstract(models.Model):
body = RichTextField(verbose_name=_('body'))
tags = ClusterTaggableManager(through='puput.TagEntryPage', blank=True)
date = models.DateTimeField(verbose_name=_("Post date"), default=datetime.datetime.today)
header_image = models.ForeignKey('wagtailimages.Image', verbose_name=_('Header image'), null=True, blank=True,
on_delete=models.SET_NULL, related_name='+', )
categories = models.ManyToManyField('puput.Category', through='puput.CategoryEntryPage', blank=True)
excerpt = RichTextField(verbose_name=_('excerpt'), blank=True,
help_text=_("Entry excerpt to be displayed on entries list. "
"If this field is not filled, a truncate version of body text will be used."))
num_comments = models.IntegerField(default=0, editable=False)
class Meta:
abstract = True
| {
"content_hash": "ac9bc333991bc402f562597e89e1a5e2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 118,
"avg_line_length": 50.52173913043478,
"alnum_prop": 0.6867469879518072,
"repo_name": "taedori81/puput",
"id": "5763562cb037bb6fba9309e5ac0ae6fe75962528",
"size": "1162",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "puput/abstracts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25656"
},
{
"name": "HTML",
"bytes": "14622"
},
{
"name": "JavaScript",
"bytes": "3766"
},
{
"name": "Python",
"bytes": "32183"
}
],
"symlink_target": ""
} |
import json
import logging
from typing import (
Any,
Dict,
)
import kafka
import slackclient
from papika.bridges import Bridge
log = logging.getLogger(__name__)
def is_valid_message(message: Dict[str, Any]) -> bool:
is_valid = True
if 'channel' not in message:
log.error("Required argument 'channel' was missing in message: {0}".format(message))
is_valid = False
if 'text' not in message and 'attachments' not in message:
log.error("One of either 'text' or 'attachments' must be supplied in message: {0}".format(message))
is_valid = False
return is_valid
class BridgeToSlack(Bridge):
def __init__(self, config: Dict[str, Any]) -> None:
token = config['slack']['api_token']
self.slack_client = slackclient.SlackClient(token)
self.kafka_consumer = kafka.KafkaConsumer(
config['kafka']['to_slack']['topic'],
bootstrap_servers=config['kafka']['bootstrap_servers'],
group_id=config['kafka']['to_slack']['group_id'],
)
def run(self) -> None:
auth_test = self.slack_client.api_call('auth.test')
if auth_test['ok']:
log.info("Successfully authenticated with Slack: {0}".format(auth_test))
else:
log.error("Could not authenticate to Slack: {0}".format(auth_test))
raise ValueError("Invalid Slack token")
for event in self.kafka_consumer:
try:
raw_message = event.value.decode('utf-8')
except UnicodeDecodeError:
log.exception("Could not decode: {0}".format(event.value))
continue
try:
message = json.loads(raw_message)
except json.JSONDecodeError:
log.exception("Could not parse as JSON: {0}".format(raw_message))
continue
if not is_valid_message(message):
log.error("Received invalid message, skipping: {0}".format(message))
continue
message_as_kwargs = dict(
channel=message['channel'],
parse=message.get('parse', 'none'),
link_names=message.get('link_names', '1'),
attachments=message.get('attachments'),
unfurl_links=message.get('unfurl_links', 'false'),
unfurl_media=message.get('unfurl_media', 'true'),
username=message.get('username', ''),
as_user=message.get('as_user', 'true'),
icon_url=message.get('icon_url'),
icon_emoji=message.get('icon_emoji'),
)
if 'text' in message:
message_as_kwargs['text'] = message['text']
log.info("Sending message: {0}".format(message_as_kwargs))
try:
self.slack_client.api_call(
'chat.postMessage',
**message_as_kwargs,
)
except:
log.exception("Unable to send message: %s", message)
| {
"content_hash": "10e41eae634f6df8a5dacd5abad6aef0",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 107,
"avg_line_length": 33.25,
"alnum_prop": 0.5541026479241582,
"repo_name": "kennydo/papika",
"id": "befeb2a537077967b54fc27b4c41945858fcf415",
"size": "3059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "papika/bridges/to_slack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7872"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Extension
setup(name='yara-python',
version='3.2.0',
author='Victor M. Alvarez',
author_email='plusvic@gmail.com;vmalvarez@virustotal.com',
ext_modules=[Extension(
name='yara',
sources=['yara-python.c'],
include_dirs=['../windows/include', '../libyara/include'],
extra_objects=[
'user32.lib', 'gdi32.lib', '../windows/libyara/Release/libyara32.lib']
)])
| {
"content_hash": "3d4dec786f39b4379117ad4853c02935",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 34.142857142857146,
"alnum_prop": 0.5857740585774058,
"repo_name": "digideskio/yara",
"id": "61af2ea08fccc0ebcd108f47b8b6b8c56b9fe416",
"size": "1108",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "yara-python/setupwin32.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2386911"
},
{
"name": "C++",
"bytes": "100026"
},
{
"name": "Groff",
"bytes": "4049"
},
{
"name": "Lex",
"bytes": "29817"
},
{
"name": "Python",
"bytes": "33757"
},
{
"name": "Shell",
"bytes": "80"
},
{
"name": "Yacc",
"bytes": "60268"
}
],
"symlink_target": ""
} |
#!python3
# -*- coding:utf-8 -*-
import os
import sys
import time
import ctypes
import shutil
import subprocess
IsPy3 = sys.version_info[0] >= 3
if IsPy3:
import winreg
else:
import codecs
import _winreg as winreg
BuildType = 'Release'
IsRebuild = True
Build = 'Rebuild'
Update = False
Copy = False
CleanAll = False
BuildTimeout = 30*60
Bit = 'Win32'
Dlllib = 'dll'
MSBuild = None
IncrediBuild = None
UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译
#不同项目只需修改下面5个变量
SlnFile = '../CRandom.sln' #相对于本py脚本路径的相对路径
UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新
ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行
MSBuildFirstProjects = [r'CRandom'] #使用MSBuild需要工程文件在解决方案sln中的路径
# MSBuild首先编译的项目,填空不指定顺序
IncrediBuildFirstProjects = ['CRandom'] #使用IncrediBuild只需工程名字
#IncrediBuild首先编译的项目,填空不指定顺序
class ConsoleColor():
'''This class defines the values of color for printing on console window'''
Black = 0
DarkBlue = 1
DarkGreen = 2
DarkCyan = 3
DarkRed = 4
DarkMagenta = 5
DarkYellow = 6
Gray = 7
DarkGray = 8
Blue = 9
Green = 10
Cyan = 11
Red = 12
Magenta = 13
Yellow = 14
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling'''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
if Bit == 'Win32':
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
elif Bit == 'x64':
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" amd64\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir = os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
n2 += 1
if n2 == 2:
break
start = line.find(' ', start)
start += 1
end = line.find(' ', start)
failCount = int(line[start:end])
buildFailed = failCount > 0
else:
Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)
fin.close()
costTime = time.time() - startTime
Logger.WriteLine('build cost time: {0:.1f}s\n'.format(costTime), ConsoleColor.Green)
if not buildFailed:
return True
return False
def BuildAllProjects():
buildSuccess = False
cmds = []
if UseMSBuild:
if IsRebuild:
if CleanAll:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))
else:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
for project in MSBuildFirstProjects:
cmds.append('{0} {1} /t:{2} /p:Configuration={3};platform={4} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType, Bit))
cmds.append('{0} {1} /p:Configuration={2};platform={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType, Bit))
else: #IncrediBuild
if IsRebuild:
if CleanAll:
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug', Bit))
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release', Bit))
else:
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))
for project in IncrediBuildFirstProjects:
cmds.append('"{0}" {1} /build /prj={2} /cfg="{3}|{4}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType, Bit))
cmds.append('"{0}" {1} /build /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))
for cmd in cmds:
buildSuccess = BuildProject(cmd)
if not buildSuccess:
break
return buildSuccess
def main():
if UseMSBuild:
if not os.path.exists(MSBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
else:
if not os.path.exists(IncrediBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
dir = os.path.dirname(__file__)
if dir:
oldDir = os.getcwd()
os.chdir(dir)
if Update:
if not UpdateCode():
return 1
Logger.Log('git update succeed', ConsoleColor.Green)
if Copy:
for bat in ExecBatList:
oldBatDir = os.getcwd()
batDir = os.path.dirname(bat)
batName = os.path.basename(bat)
if batDir:
os.chdir(batDir)
start = time.clock()
os.system(batName)
Logger.Log('run "{}" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)
if batDir:
os.chdir(oldBatDir)
buildSuccess = BuildAllProjects()
if buildSuccess:
Logger.Log('build succeed', ConsoleColor.Green)
else:
Logger.Log('build failed', ConsoleColor.Red)
if dir:
os.chdir(oldDir)
return 0 if buildSuccess else 1
if __name__ == '__main__':
Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)
sys.argv = [x.lower() for x in sys.argv]
start_time = time.time()
if 'debug' in sys.argv:
BuildType = 'Debug'
if 'lib' in sys.argv:
Dlllib = 'lib'
SlnFile = '../CRandom_lib.sln'
MSBuildFirstProjects = [r'CRandom_lib']
IncrediBuildFirstProjects = ['CRandom_lib']
if '64' in sys.argv:
Bit = 'x64'
if 'build' in sys.argv:
IsRebuild = False
Build = 'Build'
if 'update' in sys.argv:
Update = True
if 'copy' in sys.argv:
Copy = True
if 'clean' in sys.argv:
CleanAll = True
if 'incredibuild' in sys.argv:
UseMSBuild = False
if UseMSBuild:
MSBuild = GetMSBuildPath()
if not MSBuild:
Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)
exit(1)
else:
IncrediBuild = GetIncrediBuildPath()
if not IncrediBuild:
Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)
exit(1)
cwd = os.getcwd()
Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))
ret = main()
end_time = time.time()
cost_time = end_time-start_time
Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)
exit(ret) | {
"content_hash": "0aad8915b7f5b68508d027ad237dde0f",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 195,
"avg_line_length": 38.19718309859155,
"alnum_prop": 0.5918879056047197,
"repo_name": "xylsxyls/xueyelingshuang",
"id": "838790e20070e10f05e1b2e2621f3d289d0c804b",
"size": "14066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/CRandom/scripts/rebuild_CRandom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70916"
},
{
"name": "C",
"bytes": "15759114"
},
{
"name": "C++",
"bytes": "10113598"
},
{
"name": "CMake",
"bytes": "226509"
},
{
"name": "COBOL",
"bytes": "20676"
},
{
"name": "HTML",
"bytes": "417"
},
{
"name": "Makefile",
"bytes": "303"
},
{
"name": "Python",
"bytes": "1481199"
},
{
"name": "QML",
"bytes": "266"
},
{
"name": "Shell",
"bytes": "93441"
}
],
"symlink_target": ""
} |
import json
import pytest
from openshift_checks.logging.logging_index_time import LoggingIndexTime, OpenShiftCheckException
SAMPLE_UUID = "unique-test-uuid"
def canned_loggingindextime(exec_oc=None):
"""Create a check object with a canned exec_oc method"""
check = LoggingIndexTime() # fails if a module is actually invoked
if exec_oc:
check.exec_oc = exec_oc
return check
plain_running_elasticsearch_pod = {
"metadata": {
"labels": {"component": "es", "deploymentconfig": "logging-es-data-master"},
"name": "logging-es-data-master-1",
},
"status": {
"containerStatuses": [{"ready": True}, {"ready": True}],
"phase": "Running",
}
}
plain_running_kibana_pod = {
"metadata": {
"labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
"name": "logging-kibana-1",
},
"status": {
"containerStatuses": [{"ready": True}, {"ready": True}],
"phase": "Running",
}
}
not_running_kibana_pod = {
"metadata": {
"labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
"name": "logging-kibana-2",
},
"status": {
"containerStatuses": [{"ready": True}, {"ready": False}],
"conditions": [{"status": "True", "type": "Ready"}],
"phase": "pending",
}
}
@pytest.mark.parametrize('pods, expect_pods', [
(
[not_running_kibana_pod],
[],
),
(
[plain_running_kibana_pod],
[plain_running_kibana_pod],
),
(
[],
[],
)
])
def test_check_running_pods(pods, expect_pods):
check = canned_loggingindextime()
pods = check.running_pods(pods)
assert pods == expect_pods
def test_bad_config_param():
with pytest.raises(OpenShiftCheckException) as error:
LoggingIndexTime(task_vars=dict(openshift_check_logging_index_timeout_seconds="foo")).run()
assert 'InvalidTimeout' == error.value.name
def test_no_running_pods():
check = LoggingIndexTime()
check.get_pods_for_component = lambda *_: [not_running_kibana_pod]
with pytest.raises(OpenShiftCheckException) as error:
check.run()
assert 'kibanaNoRunningPods' == error.value.name
def test_with_running_pods():
check = LoggingIndexTime()
check.get_pods_for_component = lambda *_: [plain_running_kibana_pod, plain_running_elasticsearch_pod]
check.curl_kibana_with_uuid = lambda *_: SAMPLE_UUID
check.wait_until_cmd_or_err = lambda *_: None
assert not check.run().get("failed")
@pytest.mark.parametrize('name, json_response, uuid, timeout', [
(
'valid count in response',
{
"count": 1,
},
SAMPLE_UUID,
0.001,
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
check = canned_loggingindextime(lambda *_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@pytest.mark.parametrize('name, json_response, timeout, expect_error', [
(
'invalid json response',
{
"invalid_field": 1,
},
0.001,
'esInvalidResponse',
),
(
'empty response',
{},
0.001,
'esInvalidResponse',
),
(
'valid response but invalid match count',
{
"count": 0,
},
0.005,
'NoMatchFound',
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
check = canned_loggingindextime(lambda *_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
assert expect_error == error.value.name
def test_curl_kibana_with_uuid():
check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@pytest.mark.parametrize('name, json_response, expect_error', [
(
'invalid json response',
{
"invalid_field": "invalid",
},
'kibanaInvalidResponse',
),
(
'wrong error code in response',
{
"statusCode": 500,
},
'kibanaInvalidReturnCode',
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
check = canned_loggingindextime(lambda *_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
check.curl_kibana_with_uuid(plain_running_kibana_pod)
assert expect_error == error.value.name
| {
"content_hash": "4f3954c0791599d127227e0cdbf0785b",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 105,
"avg_line_length": 28.405882352941177,
"alnum_prop": 0.612549182025264,
"repo_name": "ttindell2/openshift-ansible",
"id": "22566b295510fe06558f1c43ae9550bc947d433b",
"size": "4829",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "roles/openshift_health_checker/test/logging_index_time_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "5005"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "JavaScript",
"bytes": "167"
},
{
"name": "Python",
"bytes": "3390510"
},
{
"name": "Roff",
"bytes": "5645"
},
{
"name": "Shell",
"bytes": "88983"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from sys import stdin, stdout
import logging
from morph_seg.sequence_tagger.train import CNNConfig, LSTMConfig, \
CNNTagger, SequenceTagger
from morph_seg.sequence_tagger.data import TrainValidData
def parse_args():
p = ArgumentParser()
p.add_argument('-t', '--train-file', default=stdin)
p.add_argument('-c', '--config', type=str,
help="Location of YAML config")
p.add_argument('-p', '--parameters', type=str,
help="Manually specify parameters."
"This option allows overriding parameters"
"from the config file."
"Format: param1=val1,param2=val2")
p.add_argument('-a', '--architecture', choices=['RNN', 'CNN'],
default='RNN')
p.add_argument('--prefix', type=str)
return p.parse_args()
def main():
args = parse_args()
if args.architecture == 'CNN':
cfg = CNNConfig.load_from_yaml(
args.config, train_file=args.train_file,
param_str=args.parameters
)
dataset = TrainValidData(cfg, args.prefix)
model = CNNTagger(dataset, cfg)
else:
cfg = LSTMConfig.load_from_yaml(
args.config, train_file=args.train_file,
param_str=args.parameters
)
dataset = TrainValidData(cfg, args.prefix)
model = SequenceTagger(dataset, cfg)
cfg.save_to_yaml(stdout)
model.run_train()
model.save_model()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| {
"content_hash": "b2f248defa4bbb22ed7dbdc1284bae16",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 68,
"avg_line_length": 33.2,
"alnum_prop": 0.6006024096385543,
"repo_name": "juditacs/morph-segmentation-experiments",
"id": "5b66db54228f4ab5194bd5c4f965a3910bdda8a7",
"size": "1824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "morph_seg/sequence_tagger/mlp_train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "100878"
},
{
"name": "Python",
"bytes": "21110"
}
],
"symlink_target": ""
} |
"""This example illustrates how to delete a report."""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to delete a report for')
argparser.add_argument(
'report_id', type=int,
help='The ID of the report to delete')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
report_id = flags.report_id
try:
# Construct the request.
request = service.reports().delete(profileId=profile_id, reportId=report_id)
# Execute request and print response.
request.execute()
print 'Successfully deleted report with ID %s.' % report_id
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "4a9a318f1ee590742ca12e4a2b9f3cee",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 28.477272727272727,
"alnum_prop": 0.6943335993615323,
"repo_name": "falbassini/googleads-dfa-reporting-samples",
"id": "d163c25b24c65f9feb3ac2615b9184e7860ad3d1",
"size": "1871",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/v2.1/delete_report.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "600848"
},
{
"name": "CSS",
"bytes": "2434"
},
{
"name": "Java",
"bytes": "542082"
},
{
"name": "PHP",
"bytes": "390126"
},
{
"name": "Python",
"bytes": "493173"
},
{
"name": "Ruby",
"bytes": "268189"
}
],
"symlink_target": ""
} |
from bs4 import BeautifulSoup
import json
import re
def extract_from_b64(encoded_doc):
#doc = base64.urlsafe_b64decode(encoded_doc)
doc = encoded_doc.decode("base64")
doc = doc.decode('utf-8')
doc = re.sub("<p>", " ", doc)
doc = re.sub('<div class="BODY-2">', " ", doc)
soup = BeautifulSoup(doc)
news_source = soup.find("meta", {"name":"sourceName"})['content']
article_title = soup.find("title").text.strip()
try:
publication_date = soup.find("div", {"class":"PUB-DATE"}).text.strip()
except AttributeError:
publication_date = soup.find("div", {"class":"DATE"}).text.strip()
article_body = soup.find("div", {"class":"BODY"}).text.strip()
doc_id = soup.find("meta", {"name":"documentToken"})['content']
data = {"news_source" : news_source,
"publication_date_raw" : publication_date,
"article_title" : article_title,
"article_body" : article_body,
"doc_id" : doc_id}
return data
| {
"content_hash": "6c06b59b29ff09a84bfcffadea9b9c5f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 37.22222222222222,
"alnum_prop": 0.5950248756218905,
"repo_name": "ahalterman/cloacina",
"id": "ee32681dc2b2c0f29f5f083e22da68588fafd3aa",
"size": "1005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloacina/extract_from_b64.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15139"
}
],
"symlink_target": ""
} |
"""Bayesian variant calling with FreeBayes.
https://github.com/ekg/freebayes
"""
import os
import sys
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bedutils, ploidy, vcfutils
from bcbio.variation.vcfutils import (get_paired_bams, is_paired_analysis,
move_vcf)
def region_to_freebayes(region):
if isinstance(region, (list, tuple)):
chrom, start, end = region
return "%s:%s..%s" % (chrom, start, end)
else:
return region
def _freebayes_options_from_config(items, config, out_file, region=None):
"""Prepare standard options from configuration input.
Input BED target files are merged to avoid overlapping regions which
cause FreeBayes to call multiple times.
Checks for empty sets of target regions after filtering for high depth,
in which case we should skip the FreeBayes run.
"""
opts = []
opts += ["--ploidy", str(ploidy.get_ploidy(items, region))]
variant_regions = bedutils.merge_overlaps(utils.get_in(config, ("algorithm", "variant_regions")),
items[0])
no_target_regions = False
target = shared.subset_variant_regions(variant_regions, region, out_file, items)
if target:
if isinstance(target, basestring) and os.path.isfile(target):
if any(tz.get_in(["config", "algorithm", "coverage_interval"], x, "").lower() == "genome"
for x in items):
target = shared.remove_highdepth_regions(target, items)
if os.path.getsize(target) == 0:
no_target_regions = True
opts += ["--targets", target]
else:
opts += ["--region", region_to_freebayes(target)]
resources = config_utils.get_resources("freebayes", config)
if resources.get("options"):
opts += resources["options"]
return opts, no_target_regions
def _add_somatic_opts(opts, paired):
"""Add somatic options to current set. See _run_freebayes_paired for references.
"""
if "--min-alternate-fraction" not in opts and "-F" not in opts:
# add minimum reportable allele frequency
# FreeBayes defaults to 20%, but use 10% by default for the
# tumor case
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
opts += " --min-alternate-fraction %s" % min_af
# Recommended settings for cancer calling
opts += (" --pooled-discrete --pooled-continuous --genotype-qualities "
"--report-genotype-likelihood-max --allele-balance-priors-off")
return opts
def run_freebayes(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run FreeBayes variant calling, either paired tumor/normal or germline calling.
"""
if is_paired_analysis(align_bams, items):
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file, somatic=paired)
else:
call_file = _run_freebayes_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None, somatic=None):
"""Detect SNPs and indels with FreeBayes.
Performs post-filtering to remove very low quality variants which
can cause issues feeding into GATK. Breaks variants into individual
allelic primitives for analysis and evaluation.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
freebayes = config_utils.get_program("freebayes", config)
vcffilter = config_utils.get_program("vcffilter", config)
input_bams = " ".join("-b %s" % x for x in align_bams)
opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region)
if no_target_regions:
vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items])
else:
opts = " ".join(opts)
# Recommended options from 1000 genomes low-complexity evaluation
# https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ
opts += " --min-repeat-entropy 1"
if somatic:
opts = _add_somatic_opts(opts, somatic)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} | "
"{vcffilter} -f 'QUAL > 5' -s | {fix_ambig} | "
"bcftools view -a - 2> /dev/null | "
"{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | "
"vcfallelicprimitives --keep-geno | vcffixup - | vcfstreamsort | "
"vt normalize -n -r {ref_file} -q - 2> /dev/null | vcfuniqalleles "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {})
ann_file = annotation.annotate_nongatk_vcf(out_file, align_bams,
assoc_files.get("dbsnp"),
ref_file, config)
return ann_file
def _run_freebayes_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with FreeBayes for paired tumor/normal samples.
Sources of options for FreeBayes:
mailing list: https://groups.google.com/d/msg/freebayes/dTWBtLyM4Vs/HAK_ZhJHguMJ
mailing list: https://groups.google.com/forum/#!msg/freebayes/LLH7ZfZlVNs/63FdD31rrfEJ
speedseq: https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L916
sga/freebayes: https://github.com/jts/sga-extra/blob/7e28caf71e8107b697f9be7162050e4fa259694b/
sga_generate_varcall_makefile.pl#L299
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
paired = get_paired_bams(align_bams, items)
assert paired.normal_bam, "Require normal BAM for FreeBayes paired calling and filtering"
freebayes = config_utils.get_program("freebayes", config)
opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region)
if no_target_regions:
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
opts = " ".join(opts)
opts += " --min-repeat-entropy 1"
opts = _add_somatic_opts(opts, paired)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cl = ("{freebayes} -f {ref_file} {opts} "
"{paired.tumor_bam} {paired.normal_bam} "
"| vcffilter -f 'QUAL > 5' -s "
"| {py_cl} -x 'bcbio.variation.freebayes.call_somatic(x)' "
"| {fix_ambig} | bcftools view -a - 2> /dev/null | "
"{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | "
"vcfallelicprimitives --keep-geno | vcffixup - | vcfstreamsort | "
"vt normalize -n -r {ref_file} -q - 2> /dev/null | vcfuniqalleles "
"{compress_cmd} > {tx_out_file}")
do.run(cl.format(**locals()), "Genotyping paired variants with FreeBayes", {})
ann_file = annotation.annotate_nongatk_vcf(out_file, align_bams,
assoc_files.get("dbsnp"), ref_file,
config)
return ann_file
# ## Filtering
def _check_lods(parts, tumor_thresh, normal_thresh):
"""Ensure likelihoods for tumor and normal pass thresholds.
Skipped if no FreeBayes GL annotations available.
"""
try:
gl_index = parts[8].split(":").index("GL")
except ValueError:
return True
try:
tumor_gls = [float(x) for x in parts[9].split(":")[gl_index].split(",")]
tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls)))
# No GL information, no tumor call (so fail it)
except IndexError:
tumor_lod = -1.0
try:
normal_gls = [float(x) for x in parts[10].split(":")[gl_index].split(",")]
normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls)))
# No GL inofmration, no normal call (so pass it)
except IndexError:
normal_lod = normal_thresh
return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
def _check_freqs(parts):
"""Ensure frequency of tumor to normal passes a reasonable threshold.
Avoids calling low frequency tumors also present at low frequency in normals,
which indicates a contamination or persistent error.
"""
thresh_ratio = 2.7
try: # FreeBayes
ao_index = parts[8].split(":").index("AO")
ro_index = parts[8].split(":").index("RO")
except ValueError:
ao_index, ro_index = None, None
try: # VarDict
af_index = parts[8].split(":").index("AF")
except ValueError:
af_index = None
if af_index is None and ao_index is None:
raise NotImplementedError("Unexpected format annotations: %s" % parts[0])
def _calc_freq(item):
try:
if ao_index is not None and ro_index is not None:
ao = sum([int(x) for x in item.split(":")[ao_index].split(",")])
ro = int(item.split(":")[ro_index])
freq = ao / float(ao + ro)
elif af_index is not None:
freq = float(item.split(":")[af_index])
except (IndexError, ValueError, ZeroDivisionError):
freq = 0.0
return freq
tumor_freq, normal_freq = _calc_freq(parts[9]), _calc_freq(parts[10])
return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
def remove_missingalt(line):
"""Remove lines that are missing an alternative allele.
During cleanup of extra alleles, bcftools has an issue in complicated cases
with duplicate alleles and will end up stripping all alternative alleles.
This removes those lines to avoid issues downstream.
"""
if not line.startswith("#"):
parts = line.split("\t")
if parts[4] == ".":
return None
return line
def call_somatic(line):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Assumes tumor/normal called with tumor first and normal second, as done in bcbio
implementation.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
if line.startswith("#CHROM"):
headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh) and _check_freqs(parts):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
return line
def _clean_freebayes_output(line):
"""Clean FreeBayes output to make post-processing with GATK happy.
XXX Not applied on recent versions which fix issues to be more compatible
with bgzip output, but retained in case of need.
- Remove lines from FreeBayes outputs where REF/ALT are identical:
2 22816178 . G G 0.0339196
or there are multiple duplicate alleles:
4 60594753 . TGAAA T,T
- Remove Type=Int specifications which are not valid VCF and GATK chokes
on.
"""
if line.startswith("#"):
line = line.replace("Type=Int,D", "Type=Integer,D")
return line
else:
parts = line.split("\t")
alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()]
if len(alleles) == len(set(alleles)):
return line
return None
def clean_vcf_output(orig_file, clean_fn, config, name="clean"):
"""Provide framework to clean a file in-place, with the specified clean
function.
"""
base, ext = utils.splitext_plus(orig_file)
out_file = "{0}-{1}{2}".format(base, name, ext)
if not utils.file_exists(out_file):
with open(orig_file) as in_handle:
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
update_line = clean_fn(line)
if update_line:
out_handle.write(update_line)
move_vcf(orig_file, "{0}.orig".format(orig_file))
move_vcf(out_file, orig_file)
with open(out_file, "w") as out_handle:
out_handle.write("Moved to {0}".format(orig_file))
| {
"content_hash": "4da7241c2df7919cff818a2760ababb4",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 111,
"avg_line_length": 47.01801801801802,
"alnum_prop": 0.5962189436035,
"repo_name": "lpantano/bcbio-nextgen",
"id": "0b2c1b262022091453bfdd8d6f164a915be83367",
"size": "15657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bcbio/variation/freebayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1553199"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
} |
import os
from flask import Flask
APP_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = False
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
block_start_string='(%',
block_end_string='%)',
variable_start_string='((',
variable_end_string='))',
comment_start_string='(#',
comment_end_string='#)',
))
app = CustomFlask(__name__)
app.config.from_object(__name__)
UPLOAD_FOLDER = './tempuploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
try:
os.mkdir(UPLOAD_FOLDER)
except:
print("cannot make folder", UPLOAD_FOLDER)
try:
os.mkdir(os.path.join(UPLOAD_FOLDER, "reference_spectra"))
except:
print("cannot make folder reference_spectra")
| {
"content_hash": "41fcc62fb9de8d7441447f522517afe8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 62,
"avg_line_length": 23.21875,
"alnum_prop": 0.6742934051144011,
"repo_name": "biorack/metatlas",
"id": "5554425d7a20decc2703076347b022a1e83b051b",
"size": "752",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "metatlas/untargeted/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4850"
},
{
"name": "Jupyter Notebook",
"bytes": "1233246"
},
{
"name": "Python",
"bytes": "1501450"
},
{
"name": "Shell",
"bytes": "66479"
},
{
"name": "wdl",
"bytes": "18796"
}
],
"symlink_target": ""
} |
"""
.. moduleauthor:: Stephen Raymond Ferg and Robert Lugg (active)
.. default-domain:: py
.. highlight:: python
Version |release|
ABOUT EASYGUI
=============
EasyGui provides an easy-to-use interface for simple GUI interaction
with a user. It does not require the programmer to know anything about
tkinter, frames, widgets, callbacks or lambda. All GUI interactions are
invoked by simple function calls that return results.
.. warning:: Using EasyGui with IDLE
You may encounter problems using IDLE to run programs that use EasyGui. Try it
and find out. EasyGui is a collection of Tkinter routines that run their own
event loops. IDLE is also a Tkinter application, with its own event loop. The
two may conflict, with unpredictable results. If you find that you have
problems, try running your EasyGui program outside of IDLE.
.. note:: EasyGui requires Tk release 8.0 or greater.
LICENSE INFORMATION
===================
EasyGui version |version|
Copyright (c) 2014, Stephen Raymond Ferg
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ABOUT THE EASYGUI LICENSE
-------------------------
| This license is what is generally known as the "modified BSD license",
| aka "revised BSD", "new BSD", "3-clause BSD".
| See http://www.opensource.org/licenses/bsd-license.php
|
| This license is GPL-compatible.
| See `<http://en.wikipedia.org/wiki/License_compatibility>`_
| See http://www.gnu.org/licenses/license-list.html#GPLCompatibleLicenses
|
| The BSD License is less restrictive than GPL.
| It allows software released under the license to be incorporated into proprietary products.
| Works based on the software may be released under a proprietary license or as closed source software.
| `<http://en.wikipedia.org/wiki/BSD_licenses#3-clause_license_.28.22New_BSD_License.22.29>`_
API
===
"""
| {
"content_hash": "695629a40277227d6fe8d98d6a2c153e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 103,
"avg_line_length": 40.151898734177216,
"alnum_prop": 0.7578814627994955,
"repo_name": "harish0507/GMapsScrapper",
"id": "910a77cb84bad01c3852f2dd0bd8d0af7c11538a",
"size": "3172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/easygui/build/lib/easygui/easygui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "455517"
}
],
"symlink_target": ""
} |
from ShareYourSystem.Classors import Representer,Attester,Classer
from ShareYourSystem.Functers import Argumenter
from ShareYourSystem.Objects import Initiator
#Definition a derived second Class
@Classer.ClasserClass()
class MakerClass(Initiator.InitiatorClass):
def __init__(self,
_MakingMyFloat=0.,
_MakingMyList=None,
_MakingMyInt=0,
_MadeMyInt=0
):
#Call the parent method
Initiator.InitiatorClass.__init__(self)
@Argumenter.ArgumenterClass()
def special_make(self,**_KwargsVariablesDict):
#Cast
self.MadeMyInt=int(self.MakingMyFloat)
#Define an instance
MyMaker=MakerClass()
#make
MyMaker.special_make(**{'MakingMyFloat':3.})
#Definition the AttestedStr
SYS._attest(
[
'MyMaker.__dict__ is '+SYS._str(
MyMaker.__dict__,
**{
'RepresentingBaseKeyStrsListBool':False,
'RepresentingAlineaIsBool':False
}
),
]
)
#Print
| {
"content_hash": "965dbc71a18b3ac41185d2a2ecf60043",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 19.466666666666665,
"alnum_prop": 0.7340182648401826,
"repo_name": "Ledoux/ShareYourSystem",
"id": "64896a9210e7e77f8175cf9ea081cef707f60588",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/draft/Argumenter/01_ExampleDoc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
'''
Listing 5.4: Rectilinear to polar coordinates
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
kernel_src = '''
__kernel void polar_rect(__global float4 *r_vals,
__global float4 *angles,
__global float4 *x_coords,
__global float4 *y_coords) {
*y_coords = sincos(*angles, x_coords);
*x_coords *= *r_vals;
*y_coords *= *r_vals;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev])
queue = cl.CommandQueue(context, dev)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev])
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and buffers
r_in = cl.array.vec.make_float4(2, 1, 3, 4)
angles_in = cl.array.vec.make_float4(3*np.pi/8, 3*np.pi/4, 4*np.pi/3, 11*np.pi/6)
x_out = np.empty_like(r_in, dtype=cl.array.vec.float4)
y_out = np.empty_like(r_in, dtype=cl.array.vec.float4)
# Create output buffer
r_arg = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=r_in)
angles_arg = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=angles_in)
x_buffer = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=x_out.nbytes)
y_buffer = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=y_out.nbytes)
# Enqueue kernel (with argument specified directly)
global_size = (1,)
local_size = None
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.polar_rect(queue, global_size, local_size, r_arg, angles_arg, x_buffer, y_buffer)
# Enqueue command to copy from buffer_out to host memory
cl.enqueue_copy(queue, dest=x_out, src=x_buffer, is_blocking=False)
cl.enqueue_copy(queue, dest=y_out, src=y_buffer, is_blocking=True)
print('X output: ' + str(x_out))
print('Y output: ' + str(y_out))
| {
"content_hash": "459cbe0ebce9bee8bf2e700787e55fcd",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 103,
"avg_line_length": 33.81967213114754,
"alnum_prop": 0.6800775569558895,
"repo_name": "oysstu/pyopencl-in-action",
"id": "10aff9cc4434d636aef5f9e9a678cfd2b8b0464b",
"size": "2063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch5/polar_rect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89496"
}
],
"symlink_target": ""
} |
from email.headerregistry import Address
from typing import Any, Dict, List, Optional, Union
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from zerver.actions.bots import (
do_change_bot_owner,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
)
from zerver.actions.create_user import do_create_user, do_reactivate_user, notify_created_bot
from zerver.actions.custom_profile_fields import (
check_remove_custom_profile_field_value,
do_update_user_custom_profile_data_if_changed,
)
from zerver.actions.user_settings import (
check_change_bot_full_name,
check_change_full_name,
do_change_avatar_fields,
do_regenerate_api_key,
)
from zerver.actions.users import (
do_change_user_role,
do_deactivate_user,
do_update_bot_config_data,
do_update_outgoing_webhook_service,
)
from zerver.context_processors import get_valid_realm_from_request
from zerver.decorator import require_member_or_admin, require_realm_admin
from zerver.forms import PASSWORD_TOO_WEAK_ERROR, CreateUserForm
from zerver.lib.avatar import avatar_url, get_gravatar_url
from zerver.lib.bot_config import set_bot_config
from zerver.lib.email_validation import email_allowed_for_realm
from zerver.lib.exceptions import (
CannotDeactivateLastUserError,
JsonableError,
MissingAuthenticationError,
OrganizationAdministratorRequired,
OrganizationOwnerRequired,
)
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.rate_limiter import rate_limit_spectator_attachment_access_by_file
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.streams import access_stream_by_id, access_stream_by_name, subscribed_to_stream
from zerver.lib.types import ProfileDataElementUpdateDict, ProfileDataElementValue, Validator
from zerver.lib.upload import upload_avatar_image
from zerver.lib.url_encoding import append_url_query_string
from zerver.lib.users import (
access_bot_by_id,
access_user_by_email,
access_user_by_id,
add_service,
check_bot_creation_policy,
check_bot_name_available,
check_full_name,
check_short_name,
check_valid_bot_config,
check_valid_bot_type,
check_valid_interface_type,
get_api_key,
get_raw_user_data,
validate_user_custom_profile_data,
)
from zerver.lib.utils import generate_api_key
from zerver.lib.validator import (
check_bool,
check_capped_string,
check_dict,
check_dict_only,
check_int,
check_int_in,
check_list,
check_none_or,
check_string,
check_union,
check_url,
)
from zerver.models import (
DisposableEmailError,
DomainNotAllowedForRealmError,
EmailContainsPlusError,
InvalidFakeEmailDomain,
Message,
Realm,
Service,
Stream,
UserProfile,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_including_cross_realm,
get_user_profile_by_id_in_realm,
)
from zproject.backends import check_password_strength
def check_last_owner(user_profile: UserProfile) -> bool:
owners = set(user_profile.realm.get_human_owner_users())
return user_profile.is_realm_owner and not user_profile.is_bot and len(owners) == 1
@has_request_variables
def deactivate_user_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: int,
deactivation_notification_comment: Optional[str] = REQ(
str_validator=check_capped_string(max_length=2000), default=None
),
) -> HttpResponse:
target = access_user_by_id(user_profile, user_id, for_admin=True)
if target.is_realm_owner and not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
if check_last_owner(target):
raise JsonableError(_("Cannot deactivate the only organization owner"))
if deactivation_notification_comment is not None:
deactivation_notification_comment = deactivation_notification_comment.strip()
return _deactivate_user_profile_backend(
request,
user_profile,
target,
deactivation_notification_comment=deactivation_notification_comment,
)
def deactivate_user_own_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if UserProfile.objects.filter(realm=user_profile.realm, is_active=True).count() == 1:
raise CannotDeactivateLastUserError(is_last_owner=False)
if user_profile.is_realm_owner and check_last_owner(user_profile):
raise CannotDeactivateLastUserError(is_last_owner=True)
do_deactivate_user(user_profile, acting_user=user_profile)
return json_success(request)
def deactivate_bot_backend(
request: HttpRequest, user_profile: UserProfile, bot_id: int
) -> HttpResponse:
target = access_bot_by_id(user_profile, bot_id)
return _deactivate_user_profile_backend(
request, user_profile, target, deactivation_notification_comment=None
)
def _deactivate_user_profile_backend(
request: HttpRequest,
user_profile: UserProfile,
target: UserProfile,
*,
deactivation_notification_comment: Optional[str],
) -> HttpResponse:
do_deactivate_user(target, acting_user=user_profile)
# It's important that we check for None explicitly here, since ""
# encodes sending an email without a custom administrator comment.
if deactivation_notification_comment is not None:
send_email(
"zerver/emails/deactivate",
to_user_ids=[target.id],
from_address=FromAddress.NOREPLY,
context={
"deactivation_notification_comment": deactivation_notification_comment,
"realm_uri": target.realm.uri,
"realm_name": target.realm.name,
},
)
return json_success(request)
def reactivate_user_backend(
request: HttpRequest, user_profile: UserProfile, user_id: int
) -> HttpResponse:
target = access_user_by_id(
user_profile, user_id, allow_deactivated=True, allow_bots=True, for_admin=True
)
if target.is_bot:
assert target.bot_type is not None
check_bot_creation_policy(user_profile, target.bot_type)
do_reactivate_user(target, acting_user=user_profile)
return json_success(request)
check_profile_data: Validator[
List[Dict[str, Optional[Union[int, ProfileDataElementValue]]]]
] = check_list(
check_dict_only(
[
("id", check_int),
(
"value",
check_none_or(
check_union([check_string, check_list(check_int)]),
),
),
]
),
)
@has_request_variables
def update_user_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: int,
full_name: Optional[str] = REQ(default=None),
role: Optional[int] = REQ(
default=None,
json_validator=check_int_in(
UserProfile.ROLE_TYPES,
),
),
profile_data: Optional[List[Dict[str, Optional[Union[int, ProfileDataElementValue]]]]] = REQ(
default=None,
json_validator=check_profile_data,
),
) -> HttpResponse:
target = access_user_by_id(
user_profile, user_id, allow_deactivated=True, allow_bots=True, for_admin=True
)
if role is not None and target.role != role:
# Require that the current user has permissions to
# grant/remove the role in question.
#
# Logic replicated in patch_bot_backend.
if UserProfile.ROLE_REALM_OWNER in [role, target.role] and not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
elif not user_profile.is_realm_admin:
raise OrganizationAdministratorRequired()
if target.role == UserProfile.ROLE_REALM_OWNER and check_last_owner(target):
raise JsonableError(
_("The owner permission cannot be removed from the only organization owner.")
)
do_change_user_role(target, role, acting_user=user_profile)
if full_name is not None and target.full_name != full_name and full_name.strip() != "":
# We don't respect `name_changes_disabled` here because the request
# is on behalf of the administrator.
check_change_full_name(target, full_name, user_profile)
if profile_data is not None:
clean_profile_data: List[ProfileDataElementUpdateDict] = []
for entry in profile_data:
assert isinstance(entry["id"], int)
assert not isinstance(entry["value"], int)
if entry["value"] is None or not entry["value"]:
field_id = entry["id"]
check_remove_custom_profile_field_value(target, field_id)
else:
clean_profile_data.append(
{
"id": entry["id"],
"value": entry["value"],
}
)
validate_user_custom_profile_data(target.realm.id, clean_profile_data)
do_update_user_custom_profile_data_if_changed(target, clean_profile_data)
return json_success(request)
def avatar(
request: HttpRequest,
maybe_user_profile: Union[UserProfile, AnonymousUser],
email_or_id: str,
medium: bool = False,
) -> HttpResponse:
"""Accepts an email address or user ID and returns the avatar"""
is_email = False
try:
int(email_or_id)
except ValueError:
is_email = True
if not maybe_user_profile.is_authenticated:
# Allow anonymous access to avatars only if spectators are
# enabled in the organization.
realm = get_valid_realm_from_request(request)
if not realm.allow_web_public_streams_access():
raise MissingAuthenticationError()
# We only allow the ID format for accessing a user's avatar
# for spectators. This is mainly for defense in depth, since
# email_address_visibility should mean spectators only
# interact with fake email addresses anyway.
if is_email:
raise MissingAuthenticationError()
if settings.RATE_LIMITING:
unique_avatar_key = f"{realm.id}/{email_or_id}/{medium}"
rate_limit_spectator_attachment_access_by_file(unique_avatar_key)
else:
realm = maybe_user_profile.realm
try:
if is_email:
avatar_user_profile = get_user_including_cross_realm(email_or_id, realm)
else:
avatar_user_profile = get_user_by_id_in_realm_including_cross_realm(
int(email_or_id), realm
)
# If there is a valid user account passed in, use its avatar
url = avatar_url(avatar_user_profile, medium=medium)
except UserProfile.DoesNotExist:
# If there is no such user, treat it as a new gravatar
email = email_or_id
avatar_version = 1
url = get_gravatar_url(email, avatar_version, medium)
# We can rely on the URL already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_avatar_url does '?x=x'
# hacks to prevent us from having to jump through decode/encode hoops.
assert url is not None
url = append_url_query_string(url, request.META["QUERY_STRING"])
return redirect(url)
def avatar_medium(
request: HttpRequest, maybe_user_profile: Union[UserProfile, AnonymousUser], email_or_id: str
) -> HttpResponse:
return avatar(request, maybe_user_profile, email_or_id, medium=True)
def get_stream_name(stream: Optional[Stream]) -> Optional[str]:
if stream:
return stream.name
return None
@require_member_or_admin
@has_request_variables
def patch_bot_backend(
request: HttpRequest,
user_profile: UserProfile,
bot_id: int,
full_name: Optional[str] = REQ(default=None),
role: Optional[int] = REQ(
default=None,
json_validator=check_int_in(
UserProfile.ROLE_TYPES,
),
),
bot_owner_id: Optional[int] = REQ(json_validator=check_int, default=None),
config_data: Optional[Dict[str, str]] = REQ(
default=None, json_validator=check_dict(value_validator=check_string)
),
service_payload_url: Optional[str] = REQ(json_validator=check_url, default=None),
service_interface: int = REQ(json_validator=check_int, default=1),
default_sending_stream: Optional[str] = REQ(default=None),
default_events_register_stream: Optional[str] = REQ(default=None),
default_all_public_streams: Optional[bool] = REQ(default=None, json_validator=check_bool),
) -> HttpResponse:
bot = access_bot_by_id(user_profile, bot_id)
if full_name is not None:
check_change_bot_full_name(bot, full_name, user_profile)
if role is not None and bot.role != role:
# Logic duplicated from update_user_backend.
if UserProfile.ROLE_REALM_OWNER in [role, bot.role] and not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
elif not user_profile.is_realm_admin:
raise OrganizationAdministratorRequired()
do_change_user_role(bot, role, acting_user=user_profile)
if bot_owner_id is not None:
try:
owner = get_user_profile_by_id_in_realm(bot_owner_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Failed to change owner, no such user"))
if not owner.is_active:
raise JsonableError(_("Failed to change owner, user is deactivated"))
if owner.is_bot:
raise JsonableError(_("Failed to change owner, bots can't own other bots"))
previous_owner = bot.bot_owner
if previous_owner != owner:
do_change_bot_owner(bot, owner, user_profile)
if default_sending_stream is not None:
if default_sending_stream == "":
stream: Optional[Stream] = None
else:
(stream, sub) = access_stream_by_name(user_profile, default_sending_stream)
do_change_default_sending_stream(bot, stream, acting_user=user_profile)
if default_events_register_stream is not None:
if default_events_register_stream == "":
stream = None
else:
(stream, sub) = access_stream_by_name(user_profile, default_events_register_stream)
do_change_default_events_register_stream(bot, stream, acting_user=user_profile)
if default_all_public_streams is not None:
do_change_default_all_public_streams(
bot, default_all_public_streams, acting_user=user_profile
)
if service_payload_url is not None:
check_valid_interface_type(service_interface)
assert service_interface is not None
do_update_outgoing_webhook_service(bot, service_interface, service_payload_url)
if config_data is not None:
do_update_bot_config_data(bot, config_data)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
assert isinstance(user_file, UploadedFile)
assert user_file.size is not None
upload_avatar_image(user_file, user_profile, bot)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_fields(bot, avatar_source, acting_user=user_profile)
else:
raise JsonableError(_("You may only upload one file at a time"))
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
service_interface=service_interface,
service_payload_url=service_payload_url,
config_data=config_data,
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
# Don't include the bot owner in case it is not set.
# Default bots have no owner.
if bot.bot_owner is not None:
json_result["bot_owner"] = bot.bot_owner.email
return json_success(request, data=json_result)
@require_member_or_admin
@has_request_variables
def regenerate_bot_api_key(
request: HttpRequest, user_profile: UserProfile, bot_id: int
) -> HttpResponse:
bot = access_bot_by_id(user_profile, bot_id)
new_api_key = do_regenerate_api_key(bot, user_profile)
json_result = dict(
api_key=new_api_key,
)
return json_success(request, data=json_result)
@require_member_or_admin
@has_request_variables
def add_bot_backend(
request: HttpRequest,
user_profile: UserProfile,
full_name_raw: str = REQ("full_name"),
short_name_raw: str = REQ("short_name"),
bot_type: int = REQ(json_validator=check_int, default=UserProfile.DEFAULT_BOT),
payload_url: str = REQ(json_validator=check_url, default=""),
service_name: Optional[str] = REQ(default=None),
config_data: Dict[str, str] = REQ(
default={}, json_validator=check_dict(value_validator=check_string)
),
interface_type: int = REQ(json_validator=check_int, default=Service.GENERIC),
default_sending_stream_name: Optional[str] = REQ("default_sending_stream", default=None),
default_events_register_stream_name: Optional[str] = REQ(
"default_events_register_stream", default=None
),
default_all_public_streams: Optional[bool] = REQ(json_validator=check_bool, default=None),
) -> HttpResponse:
short_name = check_short_name(short_name_raw)
if bot_type != UserProfile.INCOMING_WEBHOOK_BOT:
service_name = service_name or short_name
short_name += "-bot"
full_name = check_full_name(full_name_raw)
try:
email = Address(username=short_name, domain=user_profile.realm.get_bot_domain()).addr_spec
except InvalidFakeEmailDomain:
raise JsonableError(
_(
"Can't create bots until FAKE_EMAIL_DOMAIN is correctly configured.\n"
"Please contact your server administrator."
)
)
except ValueError:
raise JsonableError(_("Bad name or username"))
form = CreateUserForm({"full_name": full_name, "email": email})
if bot_type == UserProfile.EMBEDDED_BOT:
if not settings.EMBEDDED_BOTS_ENABLED:
raise JsonableError(_("Embedded bots are not enabled."))
if service_name not in [bot.name for bot in EMBEDDED_BOTS]:
raise JsonableError(_("Invalid embedded bot name."))
if not form.is_valid(): # nocoverage
# coverage note: The similar block above covers the most
# common situation where this might fail, but this case may be
# still possible with an overly long username.
raise JsonableError(_("Bad name or username"))
try:
get_user_by_delivery_email(email, user_profile.realm)
raise JsonableError(_("Username already in use"))
except UserProfile.DoesNotExist:
pass
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=full_name,
)
check_bot_creation_policy(user_profile, bot_type)
check_valid_bot_type(user_profile, bot_type)
check_valid_interface_type(interface_type)
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
raise JsonableError(_("You may only upload one file at a time"))
else:
avatar_source = UserProfile.AVATAR_FROM_USER
default_sending_stream = None
if default_sending_stream_name is not None:
(default_sending_stream, ignored_sub) = access_stream_by_name(
user_profile, default_sending_stream_name
)
default_events_register_stream = None
if default_events_register_stream_name is not None:
(default_events_register_stream, ignored_sub) = access_stream_by_name(
user_profile, default_events_register_stream_name
)
if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT) and service_name:
check_valid_bot_config(bot_type, service_name, config_data)
bot_profile = do_create_user(
email=email,
password=None,
realm=user_profile.realm,
full_name=full_name,
bot_type=bot_type,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
acting_user=user_profile,
)
if len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
assert isinstance(user_file, UploadedFile)
assert user_file.size is not None
upload_avatar_image(user_file, user_profile, bot_profile)
if bot_type in (UserProfile.OUTGOING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
assert isinstance(service_name, str)
add_service(
name=service_name,
user_profile=bot_profile,
base_url=payload_url,
interface=interface_type,
token=generate_api_key(),
)
if bot_type == UserProfile.INCOMING_WEBHOOK_BOT and service_name:
set_bot_config(bot_profile, "integration_id", service_name)
if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
notify_created_bot(bot_profile)
api_key = get_api_key(bot_profile)
json_result = dict(
user_id=bot_profile.id,
api_key=api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(request, data=json_result)
@require_member_or_admin
def get_bots_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile)
bot_profiles = bot_profiles.select_related(
"default_sending_stream", "default_events_register_stream"
)
bot_profiles = bot_profiles.order_by("date_joined")
def bot_info(bot_profile: UserProfile) -> Dict[str, Any]:
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
# Bots are supposed to have only one API key, at least for now.
# Therefore we can safely assume that one and only valid API key will be
# the first one.
api_key = get_api_key(bot_profile)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(request, data={"bots": list(map(bot_info, bot_profiles))})
def get_user_data(
user_profile: UserProfile,
include_custom_profile_fields: bool,
client_gravatar: bool,
target_user: Optional[UserProfile] = None,
) -> Dict[str, Any]:
"""
The client_gravatar field here is set to True by default assuming that clients
can compute their own gravatars, which saves bandwidth. This is more important of
an optimization than it might seem because gravatar URLs contain MD5 hashes that
compress very poorly compared to other data.
"""
realm = user_profile.realm
if realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
# If email addresses are only available to administrators,
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
members = get_raw_user_data(
realm,
user_profile,
target_user=target_user,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=False,
include_custom_profile_fields=include_custom_profile_fields,
)
if target_user is not None:
data: Dict[str, Any] = {"user": members[target_user.id]}
else:
data = {"members": [members[k] for k in members]}
return data
@has_request_variables
def get_members_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: Optional[int] = None,
include_custom_profile_fields: bool = REQ(json_validator=check_bool, default=False),
client_gravatar: bool = REQ(json_validator=check_bool, default=True),
) -> HttpResponse:
target_user = None
if user_id is not None:
target_user = access_user_by_id(
user_profile, user_id, allow_deactivated=True, allow_bots=True, for_admin=False
)
data = get_user_data(user_profile, include_custom_profile_fields, client_gravatar, target_user)
return json_success(request, data)
@require_realm_admin
@has_request_variables
def create_user_backend(
request: HttpRequest,
user_profile: UserProfile,
email: str = REQ(),
password: str = REQ(),
full_name_raw: str = REQ("full_name"),
) -> HttpResponse:
if not user_profile.can_create_users:
raise JsonableError(_("User not authorized for this query"))
full_name = check_full_name(full_name_raw)
form = CreateUserForm({"full_name": full_name, "email": email})
if not form.is_valid():
raise JsonableError(_("Bad name or username"))
# Check that the new user's email address belongs to the admin's realm
# (Since this is an admin API, we don't require the user to have been
# invited first.)
realm = user_profile.realm
try:
email_allowed_for_realm(email, user_profile.realm)
except DomainNotAllowedForRealmError:
raise JsonableError(
_("Email '{email}' not allowed in this organization").format(
email=email,
)
)
except DisposableEmailError:
raise JsonableError(_("Disposable email addresses are not allowed in this organization"))
except EmailContainsPlusError:
raise JsonableError(_("Email addresses containing + are not allowed."))
try:
get_user_by_delivery_email(email, user_profile.realm)
raise JsonableError(_("Email '{}' already in use").format(email))
except UserProfile.DoesNotExist:
pass
if not check_password_strength(password):
raise JsonableError(str(PASSWORD_TOO_WEAK_ERROR))
target_user = do_create_user(
email,
password,
realm,
full_name,
# Explicitly set tos_version=None. For servers that have
# configured Terms of Service, this means that users created
# via this mechanism will be prompted to accept the Terms of
# Service on first login.
tos_version=None,
acting_user=user_profile,
)
return json_success(request, data={"user_id": target_user.id})
def get_profile_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
raw_user_data = get_raw_user_data(
user_profile.realm,
user_profile,
target_user=user_profile,
client_gravatar=False,
user_avatar_url_field_optional=False,
)
result: Dict[str, Any] = raw_user_data[user_profile.id]
result["max_message_id"] = -1
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by("-id")[:1]
if messages:
result["max_message_id"] = messages[0].id
return json_success(request, data=result)
@has_request_variables
def get_subscription_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: int = REQ(json_validator=check_int, path_only=True),
stream_id: int = REQ(json_validator=check_int, path_only=True),
) -> HttpResponse:
target_user = access_user_by_id(user_profile, user_id, for_admin=False)
(stream, sub) = access_stream_by_id(user_profile, stream_id, allow_realm_admin=True)
subscription_status = {"is_subscribed": subscribed_to_stream(target_user, stream_id)}
return json_success(request, data=subscription_status)
@has_request_variables
def get_user_by_email(
request: HttpRequest,
user_profile: UserProfile,
email: str,
include_custom_profile_fields: bool = REQ(json_validator=check_bool, default=False),
client_gravatar: bool = REQ(json_validator=check_bool, default=True),
) -> HttpResponse:
target_user = access_user_by_email(
user_profile, email, allow_deactivated=True, allow_bots=True, for_admin=False
)
data = get_user_data(user_profile, include_custom_profile_fields, client_gravatar, target_user)
return json_success(request, data)
| {
"content_hash": "910acbbd5690519228ef5a68015ecd27",
"timestamp": "",
"source": "github",
"line_count": 786,
"max_line_length": 100,
"avg_line_length": 37.35750636132315,
"alnum_prop": 0.6719340666825597,
"repo_name": "rht/zulip",
"id": "1225a6499494a0b81a5a223217257f78501cbc78",
"size": "29363",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "zerver/views/users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "489438"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "743287"
},
{
"name": "Handlebars",
"bytes": "374049"
},
{
"name": "JavaScript",
"bytes": "4000260"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10160680"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284836"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, \
with_statement
import unittest
from miserable.dns.utils import *
class HostnameTestCase(unittest.TestCase):
def test_hostname(self):
addr = Address('www.baidu.com', 80)
self.assertTrue(addr.ipaddr is None)
self.assertEqual(addr.port, 80)
self.assertEqual(addr.hostname, 'www.baidu.com')
addr.ipaddr = ip_address('127.0.0.1')
self.assertTrue(addr.ipaddr is not None)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "272b03eee1e0475453cbec144faeb9ce",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.6605166051660517,
"repo_name": "wiiiky/shadowsocks",
"id": "6adb2db2bfb404d8e8e59a4e3eab3ec2285f7d76",
"size": "1131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/address_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "108523"
}
],
"symlink_target": ""
} |
"""
Graph core.
@package bridgecut
@author Aaron Zampaglione <azampagl@my.fit.edu>
@copyright 2011 Aaron Zampaglione
@license MIT
"""
from node import Node
from edge import Edge
from bridgecut.lib.util import combinations
class Graph(object):
@classmethod
def expand(cls, node):
"""
Builds a graph based on a single node by expanding.
Key arguments:
node -- the node to expand.
"""
visited = {}
q = [node]
while q:
node = q.pop(0)
visited[node.value] = node
for nbr in node.nbrs():
if not nbr.value in visited:
q.append(nbr)
return cls(visited)
@classmethod
def factory(cls, items):
"""
Returns a new graph.
Key arguments:
items -- the items to parse.
"""
nodes = {}
edges = {}
for value1, value2 in items:
node1 = None
node2 = None
try:
node1 = nodes[value1]
except KeyError:
node1 = nodes[value1] = Node(value1)
try:
node2 = nodes[value2]
except KeyError:
node2 = nodes[value2] = Node(value2)
if not node1 in edges:
edges[node1] = {}
if not node2 in edges:
edges[node2] = {}
if not node2 in edges[node1]:
edges[node1][node2] = edges[node2][node1] = Edge(node1, node2)
return cls(nodes)
def __init__(self, nodes):
"""
Init.
Key arguments:
nodes -- this graph's nodes.
"""
# Go in order so we have the same results for every run regardless
# of ties.
self.nodes = []
for value in sorted(nodes.iterkeys()):
self.nodes.append(nodes[value])
# Hashtable based on node values.
self.values = nodes
def __str__(self):
"""
Returns graph as a string representation.
"""
return ', '.join([str(node) for node in self.nodes])
def bfs(self, src, paths):
"""
BFS for all the shortest paths from a source node.
Optimized this method as best as possible...
Key arguments:
src -- source node
paths -- already determined paths
"""
newpaths = {}
newpaths[src] = {}
# All paths are guaranteed, so prebuild an empty dictionary.
for node in self.nodes:
if src != node:
newpaths[src][node] = None
# Keep track of nodes we have visited already.
visited = {}
visited[src] = True
# BFS
q = [(src, [])]
while q:
node, path = q.pop(0)
for nbr in node.nbrs():
if not nbr in paths and nbr != src:
if newpaths[src][nbr] != None:
l1 = len(path)
l2 = len(newpaths[src][nbr][0])
# New shortest path!
if l1 < l2:
newpaths[src][nbr] = [list(path)]
# Add another shortest path.
elif l1 == l2:
newpaths[src][nbr].append(list(path))
else:
newpaths[src][nbr] = [list(path)]
# Have we found paths to every node in the graph yet?
if not nbr in visited:
visited[nbr] = True
# Add node to the new path.
newpath = list(path)
newpath.append(nbr)
# Add to queue.
q.append((nbr, newpath))
return newpaths
def cluster_coeff(self):
"""
Find the clustering coefficient.
"""
if len(self.nodes) < 2:
return 0.0
num = 0.0
for node in self.nodes:
# Special case, node with only one neighbor.
if node.deg() < 2:
continue
edges = set()
for nbr1, nbr2 in combinations(node.nbrs(), 2):
edges = edges.union(set(nbr1.edges).intersection(nbr2.edges))
num += (2 * len(edges)) / float(node.deg() * (node.deg() - 1))
return num / len(self.nodes)
def copy(self):
"""
Returns a deep copy of this graph.
"""
items = []
for node in self.nodes:
for nbr in node.nbrs():
items.append([node.value, nbr.value])
return self.__class__.factory(items)
def density(self):
"""
Finds the density of this graph.
"""
n = len(self.nodes)
if (n - 1) == 0:
return float('inf')
return float(2 * len(self.edges())) / (n * (n - 1))
def dist(self, node1, node2, paths=None):
"""
Finds the distance between two nodes.
Key arguments:
node1 -- node1.
node2 -- node2
paths -- the shortest paths to compare against. [optional]
"""
if not paths:
paths = self.paths()
# Make sure the nodes are in this graph!
node1 = self.node(node1.value)
node2 = self.node(node2.value)
if not node1 or not node2:
return float('inf')
# We need to add one because the initial and final nodes of the route
# are not included in the actual route!
return len(paths[node1][node2][0]) + 1.0
def edges(self):
"""
Returns the edges in the graph.
"""
ret = set(self.nodes[0].edges)
for node in self.nodes[1:]:
ret = ret.union(node.edges)
return list(ret)
def node(self, value):
"""
Returns the node based on a given value.
Key arguments:
value -- the value to return.
"""
try:
return self.values[value]
except KeyError:
return None
def paths(self):
"""
Finds all the shortest paths for every possible route.
"""
paths = self.bfs(self.nodes[0], {})
for node in self.nodes[1:]:
paths.update(self.bfs(node, paths))
# Build reverse ref.
for i in range(len(self.nodes) - 1, 0, -1):
for j in range(0, i):
node1 = self.nodes[i]
node2 = self.nodes[j]
paths[node1][node2] = paths[node2][node1]
return paths
def remove(self, graph):
"""
Removes a graphs nodes from this one.
Key arguments:
graph -- the sub graph to remove nodes by.
"""
for node in list(graph.nodes):
self.nodes.remove(self.node(node.value))
del self.values[node.value] | {
"content_hash": "4353eccc9346b87cb1b6778bbabead5b",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 78,
"avg_line_length": 28.28957528957529,
"alnum_prop": 0.4576224921523134,
"repo_name": "azampagl/ai-ml-improved-bridgecut",
"id": "14b103870b2811e7aed6cc85b22016ac99469f3a",
"size": "7327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lbc/src/bridgecut/graph/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77150"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_coordinate_frame'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_EXT_coordinate_frame',False)
_p.unpack_constants( """GL_TANGENT_ARRAY_EXT 0x8439
GL_BINORMAL_ARRAY_EXT 0x843A
GL_CURRENT_TANGENT_EXT 0x843B
GL_CURRENT_BINORMAL_EXT 0x843C
GL_TANGENT_ARRAY_TYPE_EXT 0x843E
GL_TANGENT_ARRAY_STRIDE_EXT 0x843F
GL_BINORMAL_ARRAY_TYPE_EXT 0x8440
GL_BINORMAL_ARRAY_STRIDE_EXT 0x8441
GL_TANGENT_ARRAY_POINTER_EXT 0x8442
GL_BINORMAL_ARRAY_POINTER_EXT 0x8443
GL_MAP1_TANGENT_EXT 0x8444
GL_MAP2_TANGENT_EXT 0x8445
GL_MAP1_BINORMAL_EXT 0x8446
GL_MAP2_BINORMAL_EXT 0x8447""", globals())
glget.addGLGetConstant( GL_TANGENT_ARRAY_EXT, (1,) )
glget.addGLGetConstant( GL_CURRENT_TANGENT_EXT, (1,) )
glget.addGLGetConstant( GL_CURRENT_BINORMAL_EXT, (1,) )
glget.addGLGetConstant( GL_TANGENT_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_TANGENT_ARRAY_STRIDE_EXT, (1,) )
glget.addGLGetConstant( GL_BINORMAL_ARRAY_TYPE_EXT, (1,) )
glget.addGLGetConstant( GL_BINORMAL_ARRAY_STRIDE_EXT, (1,) )
@_f
@_p.types(None,_cs.GLbyte,_cs.GLbyte,_cs.GLbyte)
def glTangent3bEXT( tx,ty,tz ):pass
@_f
@_p.types(None,arrays.GLbyteArray)
def glTangent3bvEXT( v ):pass
@_f
@_p.types(None,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glTangent3dEXT( tx,ty,tz ):pass
@_f
@_p.types(None,arrays.GLdoubleArray)
def glTangent3dvEXT( v ):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glTangent3fEXT( tx,ty,tz ):pass
@_f
@_p.types(None,arrays.GLfloatArray)
def glTangent3fvEXT( v ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint)
def glTangent3iEXT( tx,ty,tz ):pass
@_f
@_p.types(None,arrays.GLintArray)
def glTangent3ivEXT( v ):pass
@_f
@_p.types(None,_cs.GLshort,_cs.GLshort,_cs.GLshort)
def glTangent3sEXT( tx,ty,tz ):pass
@_f
@_p.types(None,arrays.GLshortArray)
def glTangent3svEXT( v ):pass
@_f
@_p.types(None,_cs.GLbyte,_cs.GLbyte,_cs.GLbyte)
def glBinormal3bEXT( bx,by,bz ):pass
@_f
@_p.types(None,arrays.GLbyteArray)
def glBinormal3bvEXT( v ):pass
@_f
@_p.types(None,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glBinormal3dEXT( bx,by,bz ):pass
@_f
@_p.types(None,arrays.GLdoubleArray)
def glBinormal3dvEXT( v ):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glBinormal3fEXT( bx,by,bz ):pass
@_f
@_p.types(None,arrays.GLfloatArray)
def glBinormal3fvEXT( v ):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint)
def glBinormal3iEXT( bx,by,bz ):pass
@_f
@_p.types(None,arrays.GLintArray)
def glBinormal3ivEXT( v ):pass
@_f
@_p.types(None,_cs.GLshort,_cs.GLshort,_cs.GLshort)
def glBinormal3sEXT( bx,by,bz ):pass
@_f
@_p.types(None,arrays.GLshortArray)
def glBinormal3svEXT( v ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glTangentPointerEXT( type,stride,pointer ):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glBinormalPointerEXT( type,stride,pointer ):pass
def glInitCoordinateFrameEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "e9009d452640bf075a48fdcd26b16f85",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 77,
"avg_line_length": 32.17,
"alnum_prop": 0.7444824370531551,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "0d04c56808ecb2ac37a2263c8f4fa996cb266815",
"size": "3217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/coordinate_frame.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
def sample_list_optimal_trials():
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListOptimalTrialsRequest(
parent="parent_value",
)
# Make the request
response = client.list_optimal_trials(request=request)
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_sync]
| {
"content_hash": "840cffcece7d2fbe7680ad0eb3764e2c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 26.05263157894737,
"alnum_prop": 0.7313131313131314,
"repo_name": "googleapis/python-aiplatform",
"id": "ea190408f36734f5e98c014a1a3ca2745c10adb0",
"size": "1899",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from __future__ import division
__author__ = "Kishori M Konwar Niels W Hanson"
__copyright__ = "Copyright 2014, MetaPathways"
__credits__ = [""]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar Niels W Hanson"
__status__ = "Release"
#from libs.starcluster.test import teststarcluster as sctest
#import sys
try:
import sys, traceback, re, inspect, signal, shutil
from os import makedirs, sys, listdir, environ, path, _exit
#from commands import getstatusoutput
from optparse import OptionParser
from libs.python_modules.utils import metapathways_utils
from libs.python_modules.utils.utils import *
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, eprintf, halt_process, exit_process, WorkflowLogger, generate_log_fp
from libs.python_modules.parsers.parse import parse_metapaths_parameters, parse_parameter_file
from libs.python_modules.pipeline.metapathways_pipeline import print_commands, print_to_stdout, no_status_updates
from libs.python_modules.utils.sysutil import pathDelim
from libs.python_modules.pipeline.metapathways import run_metapathways, get_parameter, read_pipeline_configuration
from libs.python_modules.annotate import *
from libs.python_modules.grid.blast_using_grid import blast_in_grid
from libs.python_modules.diagnostics.parameters import *
from libs.python_modules.diagnostics.diagnoze import *
from libs.python_modules.pipeline.sampledata import *
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed \"source MetaPathwaysrc\""""
print """ """
#print traceback.print_exc(10)
sys.exit(3)
cmd_folder = path.abspath(path.split(inspect.getfile( inspect.currentframe() ))[0])
PATHDELIM = str(pathDelim())
#print cmd_folder
#if not sys.platform.startswith('win'):
# res =getstatusoutput('source '+ cmd_folder +'/'+'.metapathsrc')
# if( int(res[0])==0 ):
# print 'Ran ' + cmd_folder +'/'+'.metapathsrc ' + ' file successfully!'
# else:
# print 'Error : ' + res[1]
# print 'while running ' + cmd_folder +'/'+'.metapathsrc ' + ' file!'
#sys.path.insert(0,cmd_folder + "/libs/python_modules/")
#sys.path.insert(1, cmd_folder + "/libs/")
#print sys.path
#config = load_config()
metapaths_config = """config/template_config.txt""";
metapaths_param = """config/template_param.txt""";
script_info={}
script_info['brief_description'] = """A workflow script for making PGDBs from metagenomic sequences"""
script_info['script_description'] = \
""" This script starts a MetaPathways pipeline run. It requires an input directory of fasta or genbank files
containing sequences to process, an output directory for results to be placed. It also requires the
configuration files, template_config.txt and template_param.txt in the config/ directory, to be updated with the
location of resources on your system.
"""
script_info['script_usage'] = []
usage= sys.argv[0] + """ -i input_dir -o output_dir -p parameters.txt
For more options: ./MetaPathways.py -h"""
parser = None
def createParser():
global parser
parser = OptionParser(usage)
parser.add_option("-i", "--input_file", dest="input_fp",
help='the input fasta file/input dir [REQUIRED]')
parser.add_option("-o", "--output_dir", dest="output_dir",
help='the input fasta file/input dir [REQUIRED]')
parser.add_option('-p','--parameter_fp', dest="parameter_fp",
help='path to the parameter file [REQUIRED]')
parser.add_option("-c", "--config_filer", dest="config_file",
help='pipeline_configuratin file [OPTIONAL, default : \"MetaPathways/template_config.txt\"]')
parser.add_option('-r','--run-type', dest="run_type", default='safe',
choices=['safe', 'overlay', 'overwrite','dry-run'],
help= '\n(a) \'overwrite\' -- wipes out the previous runs with the same name\n'+
'\n(b)\'overlay\' -- recomputes the steps that are not present \n' +
'\n(d)\'safe\' -- safe mode does not run on an existing run folder\n')
#ith out of order completion \ time-stamps in the \'workflow_log.txt\'
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print lots of information on the stdout [default]")
parser.add_option("-b", "--block-mode",
action="store_true", dest="block_mode", default=True,
help="processes the samples by blocking the stages before and after functional search [default off]")
parser.add_option("-d", "--delay", dest="delay", type='int', default=0,
help="number of seconds to sleep once the run is done")
parser.add_option("-P", "--print-only",
action="store_true", dest="print_only", default=False,
help="print only the commands [default False]")
parser.add_option("-s", "--subset", dest="sample_subset", action="append", default=[],
help="Processes only samples in the list subset specified [ -s sample1 -s sample2 ]" )
parser.add_option("--runid", dest="runid", default="",
help="Any string to represent the runid [ default Empty string ]" )
def valid_arguments(opts, args):
""" checks if the supplied arguments are adequate """
if (opts.input_fp == None and opts.output_dir ==None ) or\
opts.output_dir == None:
return True
else:
return False
def derive_sample_name(filename):
basename = path.basename(filename)
shortname = re.sub('[.]gbk$','',basename, re.IGNORECASE)
shortname = re.sub('[.](fasta|fas|fna|faa|fa)$','',shortname, re.IGNORECASE)
return shortname
def remove_unspecified_samples(input_output_list, sample_subset, globalerrorlogger = None):
""" keep only the samples that are specified before processing """
shortened_names = {}
input_sample_list = input_output_list.keys()
for sample_name in input_sample_list:
short_sample_name = derive_sample_name(sample_name)
# print short_sample_name, len(short_sample_name)
if len(short_sample_name) > 35:
eprintf("ERROR\tSample name %s must not be longer than 35 characters!\n",short_sample_name)
if globalerrorlogger:
globalerrorlogger.printf("ERROR\tSample name %s must not be longer than 35 characters!\n",short_sample_name)
if not derive_sample_name(sample_name) in sample_subset and sample_subset:
del input_output_list[sample_name]
def check_for_error_in_input_file_name(shortname, globalerrorlogger=None):
""" creates a list of input output pairs if input is an input dir """
clean = True
if not re.search(r'^[a-zA-Z]',shortname):
eprintf("ERROR\tSample name %s must begin with an alphabet!\n",shortname)
if globalerrorlogger:
globalerrorlogger.printf("ERROR\tSample name %s must begin with an alphabet!\tConsider prefixing an alphabet to the front\n",shortname)
clean = False
if re.search(r'[.]',shortname):
eprintf("ERROR\tSample name %s contains a '.' in its name!\n",shortname)
if globalerrorlogger:
globalerrorlogger.printf("ERROR\tSample name %s contains a '.' in its name!\n",shortname)
clean = False
if len(shortname)<2:
eprintf("ERROR\tSample name %s is too short!\n",shortname)
if globalerrorlogger:
globalerrorlogger.printf("ERROR\tSample name %s is too short1\n",shortname)
clean = False
if clean:
return clean
errmessage = """Sample names before the suffixes .fasta, .fas, .fna, .faa or .gbk, must consist only of alphabets, digits and _; and should consist of at least two characters """
eprintf("ERROR\t%s\n",errmessage)
if globalerrorlogger:
globalerrorlogger.printf("ERROR\t%s\n",errmessage)
# exit_process(errmessage + "Exiting!" + "\n", logger=globalerrorlogger)
return False
def create_an_input_output_pair(input_file, output_dir, globalerrorlogger=None):
""" creates an input output pair if input is just an input file """
input_output = {}
if not re.search(r'.(fasta|fas|fna|faa|gbk|gff|fa)$',input_file, re.IGNORECASE):
return input_output
shortname = None
shortname = re.sub('[.]gbk$','',input_file, re.IGNORECASE)
shortname = re.sub('[.](fasta|fas|fna|faa|fa)$','',input_file, re.IGNORECASE)
# shortname = re.sub('[.]gff$','',input_file, re.IGNORECASE)
shortname = re.sub(r'.*' + PATHDELIM ,'',shortname)
if check_for_error_in_input_file_name(shortname, globalerrorlogger=globalerrorlogger):
input_output[input_file] = path.abspath(output_dir) + PATHDELIM + shortname
return input_output
def create_input_output_pairs(input_dir, output_dir, globalerrorlogger=None):
""" creates a list of input output pairs if input is an input dir """
fileslist = listdir(input_dir)
gbkPatt = re.compile('[.]gbk$',re.IGNORECASE)
fastaPatt = re.compile('[.](fasta|fas|fna|faa|fa)$',re.IGNORECASE)
gffPatt = re.compile('[.]gff$',re.IGNORECASE)
input_files = {}
for input_file in fileslist:
shortname = None
result = None
result = gbkPatt.search(input_file)
if result:
shortname = re.sub('[.]gbk$','',input_file, re.IGNORECASE)
if result==None:
result = fastaPatt.search(input_file)
if result:
shortname = re.sub('[.](fasta|fas|fna|faa|fa)$','',input_file, re.IGNORECASE)
if shortname == None:
continue
if re.search('.(fasta|fas|fna|faa|gff|gbk|fa)$',input_file, re.IGNORECASE):
if check_for_error_in_input_file_name(shortname, globalerrorlogger=globalerrorlogger):
input_files[input_file] = shortname
paired_input = {}
for key, value in input_files.iteritems():
paired_input[input_dir + PATHDELIM + key] = path.abspath(output_dir) + PATHDELIM + value
return paired_input
def removeSuffix(sample_subset_in):
sample_subset_out = []
for sample_name in sample_subset_in:
mod_name = re.sub('.(fasta|fas|fna|faa|gff|gbk|fa)$','',sample_name)
sample_subset_out.append(mod_name)
return sample_subset_out
def openGrades():
pass
def openRank():
pass
def halt_on_invalid_input(input_output_list, filetypes, sample_subset):
for samplePath in input_output_list.keys():
sampleName = path.basename(input_output_list[samplePath])
''' in the selected list'''
if not sampleName in sample_subset:
continue
if filetypes[samplePath][0]=='UNKNOWN':
eprintf("ERROR\tIncorrect input sample %s. Check for bad characters or format\n!", samplePath)
return False
return True
def report_missing_filenames(input_output_list, sample_subset, logger=None):
foundFiles = {}
for samplePath in input_output_list.keys():
sampleName = path.basename(input_output_list[samplePath])
foundFiles[sampleName] =True
for sample_in_subset in sample_subset:
if not sample_in_subset in foundFiles:
eprintf("ERROR\tCannot find input file for sample %s\n!", sample_in_subset)
if logger:
logger.printf("ERROR\tCannot file input for sample %s!\n", sample_in_subset)
# main function
def sigint_handler(signum, frame):
eprintf("Received TERMINATION signal\n")
exit_process()
def environment_variables_defined():
variables = ['METAPATHWAYS_DB']
status =True
for variable in variables:
if not variable in os.environ:
eprintf("%-10s:Environment variable %s not defined! Please set %s as \'export %s=<value>\'\n" %('ERROR', variable, variable,variable))
if variables in ['METAPATHWAYS_DB']:
status=False
return status
def main(argv):
global parser
(opts, args) = parser.parse_args()
if valid_arguments(opts, args):
print usage
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
eprintf("%-10s:%s\n" %('COMMAND', sys.argv[0] + ' ' + ' '.join(argv)) )
# initialize the input directory or file
input_fp = opts.input_fp
output_dir = path.abspath(opts.output_dir)
verbose = opts.verbose
print_only = opts.print_only
sample_subset = removeSuffix(opts.sample_subset)
run_type = opts.run_type.strip()
'''no need to remove the whole directory'''
# if run_type == 'overwrite':
# force_remove_dir=True
# else:
# force_remove_dir=False
if opts.config_file:
config_file= opts.config_file
else:
config_file = cmd_folder + PATHDELIM + metapaths_config
# try to load the parameter file
try:
if opts.parameter_fp:
parameter_fp= opts.parameter_fp
else:
parameter_fp = cmd_folder + PATHDELIM + metapaths_param
except IOError:
raise IOError, ( "Can't open parameters file (%s). Does it exist? Do you have read access?" % opts.parameter_fp )
try:
if run_type in ['overlay', 'safe'] and not path.exists(output_dir):
makedirs(output_dir)
except OSError:
print ""
print "ERROR: Cannot create output directory \"" + output_dir + "\"\n"+\
" Perhaps directory \"" + output_dir + "\" already exists.\n" +\
" Please choose a different directory, or \n" +\
" run with the option \"-r overwrite\" to force overwrite it."
sys.exit(2)
if verbose:
status_update_callback = print_to_stdout
else:
status_update_callback = no_status_updates
command_line_params={}
command_line_params['verbose']= opts.verbose
if not path.exists(parameter_fp):
eprintf("%-10s: No parameters file %s found!\n" %('WARNING', parameter_fp))
eprintf("%-10s: Creating a parameters file %s found!\n" %('INFO', parameter_fp))
create_metapaths_parameters(parameter_fp, cmd_folder)
params=parse_metapaths_parameters(parameter_fp)
""" load the sample inputs it expects either a fasta
file or a directory containing fasta and yaml file pairs
"""
globalerrorlogger = WorkflowLogger(generate_log_fp(output_dir, basefile_name= 'global_errors_warnings'), open_mode='w')
input_output_list = {}
if path.isfile(input_fp):
""" check if it is a file """
input_output_list = create_an_input_output_pair(input_fp, output_dir, globalerrorlogger=globalerrorlogger)
else:
if path.exists(input_fp):
""" check if dir exists """
input_output_list = create_input_output_pairs(input_fp, output_dir, globalerrorlogger=globalerrorlogger)
else:
""" must be an error """
eprintf("ERROR\tNo valid input sample file or directory containing samples exists .!")
eprintf("ERROR\tAs provided as arguments in the -in option.!\n")
exit_process("ERROR\tAs provided as arguments in the -in option.!\n")
""" these are the subset of sample to process if specified
in case of an empty subset process all the sample """
# remove all samples that are not specifed unless sample_subset is empty
remove_unspecified_samples(input_output_list, sample_subset, globalerrorlogger = globalerrorlogger)
# add check the config parameters
sorted_input_output_list = sorted(input_output_list.keys())
filetypes = check_file_types(sorted_input_output_list)
#stop on in valid samples
if not halt_on_invalid_input(input_output_list, filetypes, sample_subset):
globalerrorlogger.printf("ERROR\tInvalid inputs found. Check for file with bad format or characters!\n")
halt_process(opts.delay)
# make sure the sample files are found
report_missing_filenames(input_output_list, sample_subset, logger=globalerrorlogger)
#check the pipeline configuration
print 'config'
if not path.exists(config_file):
eprintf("%-10s: No config file %s found!\n" %('WARNING', config_file))
eprintf("%-10s: Creating a config file %s!\n" %('INFO', config_file))
if not environment_variables_defined():
sys.exit(0)
create_metapaths_configuration(config_file, cmd_folder)
config_settings = read_pipeline_configuration(config_file, globalerrorlogger)
parameter = Parameters()
if not staticDiagnose(config_settings, params, logger = globalerrorlogger):
eprintf("ERROR\tFailed to pass the test for required scripts and inputs before run\n")
globalerrorlogger.printf("ERROR\tFailed to pass the test for required scripts and inputs before run\n")
return
samplesData = {}
# PART1 before the blast
block_mode = opts.block_mode
runid = opts.runid
try:
# load the sample information
print "RUNNING MetaPathways version FogDog 3.0"
if len(input_output_list):
for input_file in sorted_input_output_list:
sample_output_dir = input_output_list[input_file]
algorithm = get_parameter(params, 'annotation', 'algorithm', default='LAST').upper()
s = SampleData()
s.setInputOutput(inputFile = input_file, sample_output_dir = sample_output_dir)
s.setParameter('algorithm', algorithm)
s.setParameter('FILE_TYPE', filetypes[input_file][0])
s.setParameter('SEQ_TYPE', filetypes[input_file][1])
s.clearJobs()
if run_type=='overwrite' and path.exists(sample_output_dir):
shutil.rmtree(sample_output_dir)
makedirs(sample_output_dir)
if not path.exists(sample_output_dir):
makedirs(sample_output_dir)
s.prepareToRun()
samplesData[input_file] = s
# load the sample information
run_metapathways(
samplesData,
sample_output_dir,
output_dir,
globallogger = globalerrorlogger,
command_line_params=command_line_params,
params=params,
metapaths_config=metapaths_config,
status_update_callback=status_update_callback,
config_file=config_file,
run_type = run_type,
config_settings = config_settings,
block_mode = block_mode,
runid = runid
)
else:
eprintf("ERROR\tNo valid input files/Or no files specified to process in folder %s!\n",sQuote(input_fp) )
globalerrorlogger.printf("ERROR\tNo valid input files to process in folder %s!\n",sQuote(input_fp) )
except:
exit_process(str(traceback.format_exc(10)), logger= globalerrorlogger )
eprintf(" *********** \n")
eprintf("INFO : FINISHED PROCESSING THE SAMPLES \n")
eprintf(" THE END \n")
eprintf(" *********** \n")
#halt_process(opts.delay)
#halt_process(3, verbose=opts.verbose)
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
sys.exit(get_recent_error())
halt_process(1)
| {
"content_hash": "438171c55ba2df9227faf015b112da89",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 184,
"avg_line_length": 39.13833992094862,
"alnum_prop": 0.6305291860230257,
"repo_name": "Koonkie/MetaPathways_Python_Koonkie.3.0",
"id": "4059e94d96de6bf7aab520fddcb3249669f36ded",
"size": "19826",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MetaPathways.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6380"
},
{
"name": "Perl",
"bytes": "321958"
},
{
"name": "Python",
"bytes": "1229127"
},
{
"name": "Shell",
"bytes": "2866"
}
],
"symlink_target": ""
} |
'''EDRN RDF Service.
'''
from zope.i18nmessageid import MessageFactory
PACKAGE_NAME = __name__
DEFAULT_PROFILE = 'profile-' + PACKAGE_NAME + ':default'
_ = MessageFactory(PACKAGE_NAME)
| {
"content_hash": "adbed40291f0b41b09594433ea2ab81f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 23.375,
"alnum_prop": 0.7165775401069518,
"repo_name": "EDRN/DMCCBackend",
"id": "9563677260b49cd08b32d9ff436f2d78e77208a5",
"size": "331",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/edrn.rdf/edrn/rdf/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7458"
},
{
"name": "Shell",
"bytes": "1861"
}
],
"symlink_target": ""
} |
"""Module containing the formatter for templates."""
from formatters.base import Formatter
class TemplateFormatter(Formatter):
"""Formatter to convert datas in the templating format.
This formatter is a regular one, but it renders the input
datas using a templating system, currently Jinja2.
"""
name = "template"
formats = ("jj2", "tmpl")
@classmethod
def render(cls, template_name, **datas):
"""Convert the input in YAML."""
template = cls.server.templating_system.get_template(template_name)
return template.render(**datas)
| {
"content_hash": "25c0f2637bd8509c792610d1cb88402a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 29.047619047619047,
"alnum_prop": 0.6622950819672131,
"repo_name": "v-legoff/pa-poc3",
"id": "46138aa6bd064c02b61ad23759ac06e8037e9d8d",
"size": "2153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/formatters/template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "12354"
},
{
"name": "Python",
"bytes": "643635"
},
{
"name": "Shell",
"bytes": "6471"
}
],
"symlink_target": ""
} |
import sys
from collections import OrderedDict
from types import MappingProxyType, DynamicClassAttribute
__all__ = ['Enum', 'IntEnum', 'unique']
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, proto):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super().__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
"""
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super().__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicitly checks for it, but of course
# until EnumMeta finishes running the first time the Enum class doesn't exist.
# This is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = {k: classdict[k] for k in classdict._member_names}
for name in classdict._member_names:
del classdict[name]
# check for illegal enum names (any others?)
invalid_names = set(members) & {'mro', }
if invalid_names:
raise ValueError('Invalid enum member name: {0}'.format(
','.join(invalid_names)))
# create a default docstring if one has not been provided
if '__doc__' not in classdict:
classdict['__doc__'] = 'An enumeration.'
# create our new Enum type
enum_class = super().__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in definition order
enum_class._member_map_ = OrderedDict() # name->value map
enum_class._member_type_ = member_type
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = {a for b in enum_class.mro() for a in b.__dict__}
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
for member_name in classdict._member_names:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member._value_ == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if obj_method is not None and obj_method is class_method:
setattr(enum_class, name, enum_method)
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
enum_class.__new_member__ = __new__
enum_class.__new__ = Enum.__new__
return enum_class
def __bool__(self):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API:
`value` will be the name of the new class.
`names` should be either a string of white-space/comma delimited names
(values will start at `start`), or an iterator/mapping of name, value pairs.
`module` should be set to the module this class is being created in;
if it is not set, an attempt to find that module will be made, but if
it fails the class will not be picklable.
`qualname` should be set to the actual location this class can be found
at in its module; by default it is set to the global scope. If this is
not correct, unpickling will fail in some circumstances.
`type`, if set, will be mixed in as the first base class.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member._name_ in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super().__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name) from None
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __len__(cls):
return len(cls._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a read-only view of the internal mapping.
"""
return MappingProxyType(cls._member_map_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super().__setattr__(name, value)
def _create_(cls, class_name, names=None, *, module=None, qualname=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are incremented by 1 from `start`.
* An iterable of member names. Values are incremented by 1 from `start`.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value pairs.
"""
metacls = cls.__class__
bases = (cls, ) if type is None else (type, cls)
classdict = metacls.__prepare__(class_name, bases)
# special processing needed for names?
if isinstance(names, str):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], str):
names = [(e, i) for (i, e) in enumerate(names, start)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, str):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError) as exc:
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
if qualname is not None:
enum_class.__qualname__ = qualname
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __new_member__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __new_member__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __new_member__ before falling back to
# __new__
for method in ('__new_member__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in {
None,
None.__new__,
object.__new__,
Enum.__new__,
}:
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
class Enum(metaclass=EnumMeta):
"""Generic enumeration.
Derive from this class to define new enumerations.
"""
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member._value_ == value:
return member
raise ValueError("%r is not a valid %s" % (value, cls.__name__))
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__'] + added_behavior)
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self._value_
return cls.__format__(val, format_spec)
def __hash__(self):
return hash(self._name_)
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
# DynamicClassAttribute is used to provide access to the `name` and
# `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@DynamicClassAttribute
def name(self):
"""The name of the Enum member."""
return self._name_
@DynamicClassAttribute
def value(self):
"""The value of the Enum member."""
return self._value_
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = {name: value for name, value in source.items()
if filter(name)}
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator for enumerations ensuring unique member values."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
alias_details = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates])
raise ValueError('duplicate values found in %r: %s' %
(enumeration, alias_details))
return enumeration
| {
"content_hash": "de12f1f3dc51541dff83956a053c16ce",
"timestamp": "",
"source": "github",
"line_count": 574,
"max_line_length": 99,
"avg_line_length": 38.721254355400696,
"alnum_prop": 0.5616845136326825,
"repo_name": "batermj/algorithm-challenger",
"id": "b8787d19b884862265991e7a694b00f93f7b71e9",
"size": "22226",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/enum.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
import io
import pretend
from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
from webob import datetime_utils
from warehouse.packaging import views
from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
from ...common.db.accounts import UserFactory
from ...common.db.packaging import (
ProjectFactory, ReleaseFactory, FileFactory, RoleFactory,
)
class TestProjectDetail:
def test_normalizing_redirects(self, db_request):
project = ProjectFactory.create()
name = project.name.lower()
if name == project.name:
name = project.name.upper()
db_request.matchdict = {"name": name}
db_request.current_route_path = pretend.call_recorder(
lambda name: "/project/the-redirect/"
)
resp = views.project_detail(project, db_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/project/the-redirect/"
assert db_request.current_route_path.calls == [
pretend.call(name=project.name),
]
def test_missing_release(self, db_request):
project = ProjectFactory.create()
resp = views.project_detail(project, db_request)
assert isinstance(resp, HTTPNotFound)
def test_calls_release_detail(self, monkeypatch, db_request):
project = ProjectFactory.create()
ReleaseFactory.create(project=project, version="1.0")
ReleaseFactory.create(project=project, version="2.0")
release = ReleaseFactory.create(project=project, version="3.0")
response = pretend.stub()
release_detail = pretend.call_recorder(lambda ctx, request: response)
monkeypatch.setattr(views, "release_detail", release_detail)
resp = views.project_detail(project, db_request)
assert resp is response
assert release_detail.calls == [pretend.call(release, db_request)]
class TestReleaseDetail:
def test_normalizing_redirects(self, db_request):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="3.0")
name = release.project.name.lower()
if name == release.project.name:
name = release.project.name.upper()
db_request.matchdict = {"name": name}
db_request.current_route_path = pretend.call_recorder(
lambda name: "/project/the-redirect/3.0/"
)
resp = views.release_detail(release, db_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/project/the-redirect/3.0/"
assert db_request.current_route_path.calls == [
pretend.call(name=release.project.name),
]
def test_detail_renders(self, db_request):
users = [
UserFactory.create(),
UserFactory.create(),
UserFactory.create(),
]
project = ProjectFactory.create()
releases = [
ReleaseFactory.create(project=project, version=v)
for v in ["1.0", "2.0", "3.0"]
]
files = [
FileFactory.create(
release=r,
filename="{}-{}.tar.gz".format(project.name, r.version),
python_version="source",
)
for r in releases
]
# Create a role for each user
for user in users:
RoleFactory.create(user=user, project=project)
# Add an extra role for one user, to ensure deduplication
RoleFactory.create(
user=users[0],
project=project,
role_name="another role",
)
result = views.release_detail(releases[1], db_request)
assert result == {
"project": project,
"release": releases[1],
"files": [files[1]],
"all_releases": [
(r.version, r.created) for r in reversed(releases)
],
"maintainers": sorted(users, key=lambda u: u.username.lower()),
}
class TestProjectStats:
def test_normalizing_redirects(self, pyramid_request):
project = pretend.stub(name="Foo")
name = project.name.lower()
pyramid_request.matchdict = {"name": name}
pyramid_request.current_route_path = pretend.call_recorder(
lambda name: "/_esi/project-stats/the-redirect/"
)
resp = views.project_stats(project, pyramid_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/_esi/project-stats/the-redirect/"
assert pyramid_request.current_route_path.calls == [
pretend.call(name=project.name),
]
def test_project_stats(self, pyramid_request):
project = pretend.stub(name="Foo")
class DownloadService:
_stats = {"Foo": {"daily": 10, "weekly": 70, "monthly": 300}}
def get_daily_stats(self, name):
return self._stats[name]["daily"]
def get_weekly_stats(self, name):
return self._stats[name]["weekly"]
def get_monthly_stats(self, name):
return self._stats[name]["monthly"]
services = {IDownloadStatService: DownloadService()}
pyramid_request.matchdict = {"name": project.name}
pyramid_request.find_service = lambda iface: services[iface]
stats = views.project_stats(project, pyramid_request)
assert stats == {"daily": 10, "weekly": 70, "monthly": 300}
class TestPackages:
def test_404_when_no_file(self, db_request):
db_request.matchdict["path"] = "source/f/foo/foo-1.0.tar.gz"
resp = views.packages(db_request)
assert isinstance(resp, HTTPNotFound)
def test_404_when_no_sig(self, db_request, pyramid_config):
pyramid_config.registry["filesystems"] = {
"packages": pretend.stub(exists=lambda p: False),
}
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project)
file_ = FileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
)
db_request.matchdict["path"] = "source/{}/{}/{}.asc".format(
project.name[0], project.name, file_.filename
)
resp = views.packages(db_request)
assert isinstance(resp, HTTPNotFound)
def test_404_when_missing_file(self, db_request, pyramid_config):
@pretend.call_recorder
def raiser(path):
raise FileNotFoundError
storage_service = pretend.stub(get=raiser)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project)
file_ = FileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
)
path = "source/{}/{}/{}".format(
project.name[0], project.name, file_.filename
)
db_request.matchdict["path"] = path
db_request.log = pretend.stub(
error=pretend.call_recorder(lambda event, **kw: None),
)
db_request.find_service = pretend.call_recorder(
lambda iface: storage_service
)
resp = views.packages(db_request)
assert isinstance(resp, HTTPNotFound)
assert db_request.find_service.calls == [pretend.call(IFileStorage)]
assert storage_service.get.calls == [pretend.call(path)]
assert db_request.log.error.calls == [
pretend.call("missing file data", path=path),
]
def test_serves_package_file(self, db_request):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project)
file_ = FileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
size=27,
)
path = "source/{}/{}/{}".format(
project.name[0], project.name, file_.filename
)
storage_service = pretend.stub(
get=pretend.call_recorder(
lambda path: io.BytesIO(b"some data for the fake file")
)
)
db_request.matchdict["path"] = path
db_request.find_service = pretend.call_recorder(
lambda iface: storage_service
)
resp = views.packages(db_request)
# We want to roundtrip our upload_time
last_modified = datetime_utils.parse_date(
datetime_utils.serialize_date(file_.upload_time)
)
assert db_request.find_service.calls == [pretend.call(IFileStorage)]
assert storage_service.get.calls == [pretend.call(path)]
assert resp.content_type == "application/octet-stream"
assert resp.content_encoding is None
assert resp.etag == file_.md5_digest
assert resp.last_modified == last_modified
assert resp.content_length == 27
# This needs to be last, as accessing resp.body sets the content_length
assert resp.body == b"some data for the fake file"
def test_serves_signature_file(self, db_request):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project)
file_ = FileFactory.create(
release=release,
filename="{}-{}.tar.gz".format(project.name, release.version),
python_version="source",
has_signature=True,
)
path = "source/{}/{}/{}.asc".format(
project.name[0], project.name, file_.filename
)
storage_service = pretend.stub(
get=pretend.call_recorder(
lambda path: io.BytesIO(b"some data for the fake file")
)
)
db_request.matchdict["path"] = path
db_request.find_service = pretend.call_recorder(
lambda iface: storage_service
)
resp = views.packages(db_request)
# We want to roundtrip our upload_time
last_modified = datetime_utils.parse_date(
datetime_utils.serialize_date(file_.upload_time)
)
assert db_request.find_service.calls == [pretend.call(IFileStorage)]
assert storage_service.get.calls == [pretend.call(path)]
assert resp.content_type == "application/octet-stream"
assert resp.content_encoding is None
assert resp.etag == file_.md5_digest
assert resp.last_modified == last_modified
assert resp.content_length is None
# This needs to be last, as accessing resp.body sets the content_length
assert resp.body == b"some data for the fake file"
| {
"content_hash": "d343dd9a7fbae4b0f2fe58364b381d53",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 79,
"avg_line_length": 33.68535825545171,
"alnum_prop": 0.604365116063997,
"repo_name": "HonzaKral/warehouse",
"id": "25aea02dfc4001065493c6ce5f3b41b7598bbc44",
"size": "11354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/packaging/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15520"
},
{
"name": "HTML",
"bytes": "13873"
},
{
"name": "JavaScript",
"bytes": "1611"
},
{
"name": "Makefile",
"bytes": "988"
},
{
"name": "Mako",
"bytes": "911"
},
{
"name": "Perl",
"bytes": "6993"
},
{
"name": "Python",
"bytes": "561395"
}
],
"symlink_target": ""
} |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import GCC_ARM_PATH, GCC_CR_PATH
from workspace_tools.settings import GOANNA_PATH
from workspace_tools.hooks import hook_tool
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
DIAGNOSTIC_PATTERN = re.compile('((?P<line>\d+):)(\d+:)? (?P<severity>warning|error): (?P<message>.+)')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, tool_path="", extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "cortex-m0plus"
elif target.core == "Cortex-M4F":
cpu = "cortex-m4"
elif target.core == "Cortex-M7F":
cpu = "cortex-m7"
else:
cpu = target.core.lower()
self.cpu = ["-mcpu=%s" % cpu]
if target.core.startswith("Cortex"):
self.cpu.append("-mthumb")
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7F":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
common_flags = ["-c", "-Wall", "-Wextra",
"-Wno-unused-parameter", "-Wno-missing-field-initializers",
"-fmessage-length=0", "-fno-exceptions", "-fno-builtin",
"-ffunction-sections", "-fdata-sections",
"-MMD", "-fno-delete-null-pointer-checks", "-fomit-frame-pointer"
] + self.cpu
if "save-asm" in self.options:
common_flags.append("-save-temps")
if "debug-info" in self.options:
common_flags.append("-g")
common_flags.append("-Og")
else:
common_flags.append("-Os")
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc, "-x", "assembler-with-cpp"] + common_flags
if not "analyze" in self.options:
self.cc = [main_cc, "-std=gnu99"] + common_flags
self.cppc =[main_cppc, "-std=gnu++98", "-fno-rtti"] + common_flags
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "-std=gnu99", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cppc.replace('\\', '/'), "-std=gnu++98", "-fno-rtti", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.ld = [join(tool_path, "arm-none-eabi-gcc"), "-Wl,--gc-sections", "-Wl,--wrap,main"] + self.cpu
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
def assemble(self, source, object, includes):
return [self.hook.get_cmdline_assembler(self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-o", object, source])]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines()[1:]:
file = line.replace('\\\n', '').strip()
if file:
# GCC might list more than one dependency on a single line, in this case
# the dependencies are separated by a space. However, a space might also
# indicate an actual space character in a dependency path, but in this case
# the space character is prefixed by a backslash.
# Temporary replace all '\ ' with a special char that is not used (\a in this
# case) to keep them from being interpreted by 'split' (they will be converted
# back later to a space char)
file = file.replace('\\ ', '\a')
if file.find(" ") == -1:
dependencies.append(file.replace('\a', ' '))
else:
dependencies = dependencies + [f.replace('\a', ' ') for f in file.split(" ")]
return dependencies
def is_not_supported_error(self, output):
return "error: #error [NOT_SUPPORTED]" in output
def parse_output(self, output):
# The warning/error notification is multiline
WHERE, WHAT = 0, 1
state, file, message = WHERE, None, None
for line in output.splitlines():
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
continue
# Each line should start with the file information: "filepath: ..."
# i should point past the file path ^
# avoid the first column in Windows (C:\)
i = line.find(':', 2)
if i == -1: continue
if state == WHERE:
file = line[:i]
message = line[i+1:].strip() + ' '
state = WHAT
elif state == WHAT:
match = GCC.DIAGNOSTIC_PATTERN.match(line[i+1:])
if match is None:
state = WHERE
continue
self.cc_info(
match.group('severity'),
file, match.group('line'),
message + match.group('message')
)
def archive(self, objects, lib_path):
self.default_cmd([self.ar, "rcs", lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
self.default_cmd(self.hook.get_cmdline_linker(self.ld + ["-T%s" % mem_map, "-o", output] +
objects + ["-L%s" % L for L in lib_dirs] + ["-Wl,--start-group"] + libs + ["-Wl,--end-group"]))
@hook_tool
def binary(self, resources, elf, bin):
self.default_cmd(self.hook.get_cmdline_binary([self.elf2bin, "-O", "binary", elf, bin]))
class GCC_ARM(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_ARM_PATH, extra_verbose=extra_verbose)
# Use latest gcc nanolib
if "thread-safe" not in self.options:
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
elif target.name in ["RZ_A1H", "VK_RZ_A1H", "ARCH_MAX", "DISCO_F407VG", "DISCO_F429ZI", "DISCO_F469NI", "NUCLEO_F401RE", "NUCLEO_F410RB", "NUCLEO_F411RE", "NUCLEO_F446RE", "ELMO_F411RE", "MTS_MDOT_F411RE", "MTS_DRAGONFLY_F411RE", "DISCO_F746NG"]:
self.ld.extend(["-u_printf_float", "-u_scanf_float"])
self.sys_libs.append("nosys")
class GCC_CR(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CR_PATH, extra_verbose=extra_verbose)
additional_compiler_flags = [
"-D__NEWLIB__", "-D__CODE_RED", "-D__USE_CMSIS", "-DCPP_USE_HEAP",
]
self.cc += additional_compiler_flags
self.cppc += additional_compiler_flags
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
self.ld += ["-nostdlib"]
| {
"content_hash": "1f84f49597f4713e3c10b3c42e8e4e1e",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 254,
"avg_line_length": 44.08056872037915,
"alnum_prop": 0.5651005268250726,
"repo_name": "bikeNomad/mbed",
"id": "9f18b421b1be1e4a8a58ff0c3a0de8c46e385819",
"size": "9301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspace_tools/toolchains/gcc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4841827"
},
{
"name": "C",
"bytes": "118619344"
},
{
"name": "C++",
"bytes": "7165206"
},
{
"name": "CMake",
"bytes": "4724"
},
{
"name": "HTML",
"bytes": "1071741"
},
{
"name": "JavaScript",
"bytes": "1494"
},
{
"name": "Objective-C",
"bytes": "61382"
},
{
"name": "Python",
"bytes": "647103"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import itertools
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core.missing import (
NumpyInterpolator,
ScipyInterpolator,
SplineInterpolator,
_get_nan_block_lengths,
get_clean_interp_index,
)
from xarray.core.pycompat import array_type
from xarray.tests import (
_CFTIME_CALENDARS,
assert_allclose,
assert_array_equal,
assert_equal,
raise_if_dask_computes,
requires_bottleneck,
requires_cftime,
requires_dask,
requires_scipy,
)
dask_array_type = array_type("dask")
@pytest.fixture
def da():
return xr.DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time")
@pytest.fixture
def cf_da():
def _cf_da(calendar, freq="1D"):
times = xr.cftime_range(
start="1970-01-01", freq=freq, periods=10, calendar=calendar
)
values = np.arange(10)
return xr.DataArray(values, dims=("time",), coords={"time": times})
return _cf_da
@pytest.fixture
def ds():
ds = xr.Dataset()
ds["var1"] = xr.DataArray(
[0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time"
)
ds["var2"] = xr.DataArray(
[10, np.nan, 11, 12, np.nan, 13, 14, 15, np.nan, 16, 17], dims="x"
)
return ds
def make_interpolate_example_data(shape, frac_nan, seed=12345, non_uniform=False):
rs = np.random.RandomState(seed)
vals = rs.normal(size=shape)
if frac_nan == 1:
vals[:] = np.nan
elif frac_nan == 0:
pass
else:
n_missing = int(vals.size * frac_nan)
ys = np.arange(shape[0])
xs = np.arange(shape[1])
if n_missing:
np.random.shuffle(ys)
ys = ys[:n_missing]
np.random.shuffle(xs)
xs = xs[:n_missing]
vals[ys, xs] = np.nan
if non_uniform:
# construct a datetime index that has irregular spacing
deltas = pd.TimedeltaIndex(unit="d", data=rs.normal(size=shape[0], scale=10))
coords = {"time": (pd.Timestamp("2000-01-01") + deltas).sort_values()}
else:
coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])}
da = xr.DataArray(vals, dims=("time", "x"), coords=coords)
df = da.to_pandas()
return da, df
@requires_scipy
def test_interpolate_pd_compat():
shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]
frac_nans = [0, 0.5, 1]
methods = ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]
for (shape, frac_nan, method) in itertools.product(shapes, frac_nans, methods):
da, df = make_interpolate_example_data(shape, frac_nan)
for dim in ["time", "x"]:
actual = da.interpolate_na(method=method, dim=dim, fill_value=np.nan)
expected = df.interpolate(
method=method, axis=da.get_axis_num(dim), fill_value=(np.nan, np.nan)
)
# Note, Pandas does some odd things with the left/right fill_value
# for the linear methods. This next line inforces the xarray
# fill_value convention on the pandas output. Therefore, this test
# only checks that interpolated values are the same (not nans)
expected.values[pd.isnull(actual.values)] = np.nan
np.testing.assert_allclose(actual.values, expected.values)
@requires_scipy
@pytest.mark.parametrize("method", ["barycentric", "krog", "pchip", "spline", "akima"])
def test_scipy_methods_function(method):
# Note: Pandas does some wacky things with these methods and the full
# integration tests won't work.
da, _ = make_interpolate_example_data((25, 25), 0.4, non_uniform=True)
actual = da.interpolate_na(method=method, dim="time")
assert (da.count("time") <= actual.count("time")).all()
@requires_scipy
def test_interpolate_pd_compat_non_uniform_index():
shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]
frac_nans = [0, 0.5, 1]
methods = ["time", "index", "values"]
for (shape, frac_nan, method) in itertools.product(shapes, frac_nans, methods):
da, df = make_interpolate_example_data(shape, frac_nan, non_uniform=True)
for dim in ["time", "x"]:
if method == "time" and dim != "time":
continue
actual = da.interpolate_na(
method="linear", dim=dim, use_coordinate=True, fill_value=np.nan
)
expected = df.interpolate(
method=method, axis=da.get_axis_num(dim), fill_value=np.nan
)
# Note, Pandas does some odd things with the left/right fill_value
# for the linear methods. This next line inforces the xarray
# fill_value convention on the pandas output. Therefore, this test
# only checks that interpolated values are the same (not nans)
expected.values[pd.isnull(actual.values)] = np.nan
np.testing.assert_allclose(actual.values, expected.values)
@requires_scipy
def test_interpolate_pd_compat_polynomial():
shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]
frac_nans = [0, 0.5, 1]
orders = [1, 2, 3]
for (shape, frac_nan, order) in itertools.product(shapes, frac_nans, orders):
da, df = make_interpolate_example_data(shape, frac_nan)
for dim in ["time", "x"]:
actual = da.interpolate_na(
method="polynomial", order=order, dim=dim, use_coordinate=False
)
expected = df.interpolate(
method="polynomial", order=order, axis=da.get_axis_num(dim)
)
np.testing.assert_allclose(actual.values, expected.values)
@requires_scipy
def test_interpolate_unsorted_index_raises():
vals = np.array([1, 2, 3], dtype=np.float64)
expected = xr.DataArray(vals, dims="x", coords={"x": [2, 1, 3]})
with pytest.raises(ValueError, match=r"Index 'x' must be monotonically increasing"):
expected.interpolate_na(dim="x", method="index")
def test_interpolate_no_dim_raises():
da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x")
with pytest.raises(NotImplementedError, match=r"dim is a required argument"):
da.interpolate_na(method="linear")
def test_interpolate_invalid_interpolator_raises():
da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x")
with pytest.raises(ValueError, match=r"not a valid"):
da.interpolate_na(dim="x", method="foo")
def test_interpolate_duplicate_values_raises():
data = np.random.randn(2, 3)
da = xr.DataArray(data, coords=[("x", ["a", "a"]), ("y", [0, 1, 2])])
with pytest.raises(ValueError, match=r"Index 'x' has duplicate values"):
da.interpolate_na(dim="x", method="foo")
def test_interpolate_multiindex_raises():
data = np.random.randn(2, 3)
data[1, 1] = np.nan
da = xr.DataArray(data, coords=[("x", ["a", "b"]), ("y", [0, 1, 2])])
das = da.stack(z=("x", "y"))
with pytest.raises(TypeError, match=r"Index 'z' must be castable to float64"):
das.interpolate_na(dim="z")
def test_interpolate_2d_coord_raises():
coords = {
"x": xr.Variable(("a", "b"), np.arange(6).reshape(2, 3)),
"y": xr.Variable(("a", "b"), np.arange(6).reshape(2, 3)) * 2,
}
data = np.random.randn(2, 3)
data[1, 1] = np.nan
da = xr.DataArray(data, dims=("a", "b"), coords=coords)
with pytest.raises(ValueError, match=r"interpolation must be 1D"):
da.interpolate_na(dim="a", use_coordinate="x")
@requires_scipy
def test_interpolate_kwargs():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
expected = xr.DataArray(np.array([4, 5, 6], dtype=np.float64), dims="x")
actual = da.interpolate_na(dim="x", fill_value="extrapolate")
assert_equal(actual, expected)
expected = xr.DataArray(np.array([4, 5, -999], dtype=np.float64), dims="x")
actual = da.interpolate_na(dim="x", fill_value=-999)
assert_equal(actual, expected)
def test_interpolate_keep_attrs():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
mvals = vals.copy()
mvals[2] = np.nan
missing = xr.DataArray(mvals, dims="x")
missing.attrs = {"test": "value"}
actual = missing.interpolate_na(dim="x", keep_attrs=True)
assert actual.attrs == {"test": "value"}
def test_interpolate():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
mvals = vals.copy()
mvals[2] = np.nan
missing = xr.DataArray(mvals, dims="x")
actual = missing.interpolate_na(dim="x")
assert_equal(actual, expected)
@requires_scipy
@pytest.mark.parametrize(
"method,vals",
[
pytest.param(method, vals, id=f"{desc}:{method}")
for method in [
"linear",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
for (desc, vals) in [
("no nans", np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)),
("one nan", np.array([1, np.nan, np.nan], dtype=np.float64)),
("all nans", np.full(6, np.nan, dtype=np.float64)),
]
],
)
def test_interp1d_fastrack(method, vals):
expected = xr.DataArray(vals, dims="x")
actual = expected.interpolate_na(dim="x", method=method)
assert_equal(actual, expected)
@requires_bottleneck
def test_interpolate_limits():
da = xr.DataArray(
np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64), dims="x"
)
actual = da.interpolate_na(dim="x", limit=None)
assert actual.isnull().sum() == 0
actual = da.interpolate_na(dim="x", limit=2)
expected = xr.DataArray(
np.array([1, 2, 3, 4, np.nan, 6], dtype=np.float64), dims="x"
)
assert_equal(actual, expected)
@requires_scipy
def test_interpolate_methods():
for method in ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]:
kwargs = {}
da = xr.DataArray(
np.array([0, 1, 2, np.nan, np.nan, np.nan, 6, 7, 8], dtype=np.float64),
dims="x",
)
actual = da.interpolate_na("x", method=method, **kwargs)
assert actual.isnull().sum() == 0
actual = da.interpolate_na("x", method=method, limit=2, **kwargs)
assert actual.isnull().sum() == 1
@requires_scipy
def test_interpolators():
for method, interpolator in [
("linear", NumpyInterpolator),
("linear", ScipyInterpolator),
("spline", SplineInterpolator),
]:
xi = np.array([-1, 0, 1, 2, 5], dtype=np.float64)
yi = np.array([-10, 0, 10, 20, 50], dtype=np.float64)
x = np.array([3, 4], dtype=np.float64)
f = interpolator(xi, yi, method=method)
out = f(x)
assert pd.isnull(out).sum() == 0
def test_interpolate_use_coordinate():
xc = xr.Variable("x", [100, 200, 300, 400, 500, 600])
da = xr.DataArray(
np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64),
dims="x",
coords={"xc": xc},
)
# use_coordinate == False is same as using the default index
actual = da.interpolate_na(dim="x", use_coordinate=False)
expected = da.interpolate_na(dim="x")
assert_equal(actual, expected)
# possible to specify non index coordinate
actual = da.interpolate_na(dim="x", use_coordinate="xc")
expected = da.interpolate_na(dim="x")
assert_equal(actual, expected)
# possible to specify index coordinate by name
actual = da.interpolate_na(dim="x", use_coordinate="x")
expected = da.interpolate_na(dim="x")
assert_equal(actual, expected)
@requires_dask
def test_interpolate_dask():
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"x": 5})
actual = da.interpolate_na("time")
expected = da.load().interpolate_na("time")
assert isinstance(actual.data, dask_array_type)
assert_equal(actual.compute(), expected)
# with limit
da = da.chunk({"x": 5})
actual = da.interpolate_na("time", limit=3)
expected = da.load().interpolate_na("time", limit=3)
assert isinstance(actual.data, dask_array_type)
assert_equal(actual, expected)
@requires_dask
def test_interpolate_dask_raises_for_invalid_chunk_dim():
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"time": 5})
# this checks for ValueError in dask.array.apply_gufunc
with pytest.raises(ValueError, match=r"consists of multiple chunks"):
da.interpolate_na("time")
@requires_dask
@requires_scipy
@pytest.mark.parametrize("dtype, method", [(int, "linear"), (int, "nearest")])
def test_interpolate_dask_expected_dtype(dtype, method):
da = xr.DataArray(
data=np.array([0, 1], dtype=dtype),
dims=["time"],
coords=dict(time=np.array([0, 1])),
).chunk(dict(time=2))
da = da.interp(time=np.array([0, 0.5, 1, 2]), method=method)
assert da.dtype == da.compute().dtype
@requires_bottleneck
def test_ffill():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
expected = xr.DataArray(np.array([4, 5, 5], dtype=np.float64), dims="x")
actual = da.ffill("x")
assert_equal(actual, expected)
def test_ffill_use_bottleneck():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
with xr.set_options(use_bottleneck=False):
with pytest.raises(RuntimeError):
da.ffill("x")
@requires_dask
def test_ffill_use_bottleneck_dask():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
da = da.chunk({"x": 1})
with xr.set_options(use_bottleneck=False):
with pytest.raises(RuntimeError):
da.ffill("x")
def test_bfill_use_bottleneck():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
with xr.set_options(use_bottleneck=False):
with pytest.raises(RuntimeError):
da.bfill("x")
@requires_dask
def test_bfill_use_bottleneck_dask():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
da = da.chunk({"x": 1})
with xr.set_options(use_bottleneck=False):
with pytest.raises(RuntimeError):
da.bfill("x")
@requires_bottleneck
@requires_dask
@pytest.mark.parametrize("method", ["ffill", "bfill"])
def test_ffill_bfill_dask(method):
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"x": 5})
dask_method = getattr(da, method)
numpy_method = getattr(da.compute(), method)
# unchunked axis
with raise_if_dask_computes():
actual = dask_method("time")
expected = numpy_method("time")
assert_equal(actual, expected)
# chunked axis
with raise_if_dask_computes():
actual = dask_method("x")
expected = numpy_method("x")
assert_equal(actual, expected)
# with limit
with raise_if_dask_computes():
actual = dask_method("time", limit=3)
expected = numpy_method("time", limit=3)
assert_equal(actual, expected)
# limit < axis size
with raise_if_dask_computes():
actual = dask_method("x", limit=2)
expected = numpy_method("x", limit=2)
assert_equal(actual, expected)
# limit > axis size
with raise_if_dask_computes():
actual = dask_method("x", limit=41)
expected = numpy_method("x", limit=41)
assert_equal(actual, expected)
@requires_bottleneck
def test_ffill_bfill_nonans():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
actual = expected.ffill(dim="x")
assert_equal(actual, expected)
actual = expected.bfill(dim="x")
assert_equal(actual, expected)
@requires_bottleneck
def test_ffill_bfill_allnans():
vals = np.full(6, np.nan, dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
actual = expected.ffill(dim="x")
assert_equal(actual, expected)
actual = expected.bfill(dim="x")
assert_equal(actual, expected)
@requires_bottleneck
def test_ffill_functions(da):
result = da.ffill("time")
assert result.isnull().sum() == 0
@requires_bottleneck
def test_ffill_limit():
da = xr.DataArray(
[0, np.nan, np.nan, np.nan, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time"
)
result = da.ffill("time")
expected = xr.DataArray([0, 0, 0, 0, 0, 3, 4, 5, 5, 6, 7], dims="time")
assert_array_equal(result, expected)
result = da.ffill("time", limit=1)
expected = xr.DataArray(
[0, 0, np.nan, np.nan, np.nan, 3, 4, 5, 5, 6, 7], dims="time"
)
assert_array_equal(result, expected)
def test_interpolate_dataset(ds):
actual = ds.interpolate_na(dim="time")
# no missing values in var1
assert actual["var1"].count("time") == actual.dims["time"]
# var2 should be the same as it was
assert_array_equal(actual["var2"], ds["var2"])
@requires_bottleneck
def test_ffill_dataset(ds):
ds.ffill(dim="time")
@requires_bottleneck
def test_bfill_dataset(ds):
ds.ffill(dim="time")
@requires_bottleneck
@pytest.mark.parametrize(
"y, lengths",
[
[np.arange(9), [[3, 3, 3, 0, 3, 3, 0, 2, 2]]],
[np.arange(9) * 3, [[9, 9, 9, 0, 9, 9, 0, 6, 6]]],
[[0, 2, 5, 6, 7, 8, 10, 12, 14], [[6, 6, 6, 0, 4, 4, 0, 4, 4]]],
],
)
def test_interpolate_na_nan_block_lengths(y, lengths):
arr = [[np.nan, np.nan, np.nan, 1, np.nan, np.nan, 4, np.nan, np.nan]]
da = xr.DataArray(arr * 2, dims=["x", "y"], coords={"x": [0, 1], "y": y})
index = get_clean_interp_index(da, dim="y", use_coordinate=True)
actual = _get_nan_block_lengths(da, dim="y", index=index)
expected = da.copy(data=lengths * 2)
assert_equal(actual, expected)
@requires_cftime
@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
def test_get_clean_interp_index_cf_calendar(cf_da, calendar):
"""The index for CFTimeIndex is in units of days. This means that if two series using a 360 and 365 days
calendar each have a trend of .01C/year, the linear regression coefficients will be different because they
have different number of days.
Another option would be to have an index in units of years, but this would likely create other difficulties.
"""
i = get_clean_interp_index(cf_da(calendar), dim="time")
np.testing.assert_array_equal(i, np.arange(10) * 1e9 * 86400)
@requires_cftime
@pytest.mark.parametrize(
("calendar", "freq"), zip(["gregorian", "proleptic_gregorian"], ["1D", "1M", "1Y"])
)
def test_get_clean_interp_index_dt(cf_da, calendar, freq):
"""In the gregorian case, the index should be proportional to normal datetimes."""
g = cf_da(calendar, freq=freq)
g["stime"] = xr.Variable(data=g.time.to_index().to_datetimeindex(), dims=("time",))
gi = get_clean_interp_index(g, "time")
si = get_clean_interp_index(g, "time", use_coordinate="stime")
np.testing.assert_array_equal(gi, si)
@requires_cftime
def test_get_clean_interp_index_potential_overflow():
da = xr.DataArray(
[0, 1, 2],
dims=("time",),
coords={"time": xr.cftime_range("0000-01-01", periods=3, calendar="360_day")},
)
get_clean_interp_index(da, "time")
@pytest.mark.parametrize("index", ([0, 2, 1], [0, 1, 1]))
def test_get_clean_interp_index_strict(index):
da = xr.DataArray([0, 1, 2], dims=("x",), coords={"x": index})
with pytest.raises(ValueError):
get_clean_interp_index(da, "x")
clean = get_clean_interp_index(da, "x", strict=False)
np.testing.assert_array_equal(index, clean)
assert clean.dtype == np.float64
@pytest.fixture
def da_time():
return xr.DataArray(
[np.nan, 1, 2, np.nan, np.nan, 5, np.nan, np.nan, np.nan, np.nan, 10],
dims=["t"],
)
def test_interpolate_na_max_gap_errors(da_time):
with pytest.raises(
NotImplementedError, match=r"max_gap not implemented for unlabeled coordinates"
):
da_time.interpolate_na("t", max_gap=1)
with pytest.raises(ValueError, match=r"max_gap must be a scalar."):
da_time.interpolate_na("t", max_gap=(1,))
da_time["t"] = pd.date_range("2001-01-01", freq="H", periods=11)
with pytest.raises(TypeError, match=r"Expected value of type str"):
da_time.interpolate_na("t", max_gap=1)
with pytest.raises(TypeError, match=r"Expected integer or floating point"):
da_time.interpolate_na("t", max_gap="1H", use_coordinate=False)
with pytest.raises(ValueError, match=r"Could not convert 'huh' to timedelta64"):
da_time.interpolate_na("t", max_gap="huh")
@requires_bottleneck
@pytest.mark.parametrize(
"time_range_func",
[pd.date_range, pytest.param(xr.cftime_range, marks=requires_cftime)],
)
@pytest.mark.parametrize("transform", [lambda x: x, lambda x: x.to_dataset(name="a")])
@pytest.mark.parametrize(
"max_gap", ["3H", np.timedelta64(3, "h"), pd.to_timedelta("3H")]
)
def test_interpolate_na_max_gap_time_specifier(
da_time, max_gap, transform, time_range_func
):
da_time["t"] = time_range_func("2001-01-01", freq="H", periods=11)
expected = transform(
da_time.copy(data=[np.nan, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan, 10])
)
actual = transform(da_time).interpolate_na("t", max_gap=max_gap)
assert_allclose(actual, expected)
@requires_bottleneck
@pytest.mark.parametrize(
"coords",
[
pytest.param(None, marks=pytest.mark.xfail()),
{"x": np.arange(4), "y": np.arange(11)},
],
)
def test_interpolate_na_2d(coords):
da = xr.DataArray(
[
[1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
],
dims=["x", "y"],
coords=coords,
)
actual = da.interpolate_na("y", max_gap=2)
expected_y = da.copy(
data=[
[1, 2, 3, 4, 5, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, 4, 5, 6, 7, np.nan, np.nan, np.nan, 11],
]
)
assert_equal(actual, expected_y)
actual = da.interpolate_na("x", max_gap=3)
expected_x = xr.DataArray(
[
[1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
[1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],
],
dims=["x", "y"],
coords=coords,
)
assert_equal(actual, expected_x)
@requires_scipy
def test_interpolators_complex_out_of_bounds():
"""Ensure complex nans are used for complex data"""
xi = np.array([-1, 0, 1, 2, 5], dtype=np.float64)
yi = np.exp(1j * xi)
x = np.array([-2, 1, 6], dtype=np.float64)
expected = np.array(
[np.nan + np.nan * 1j, np.exp(1j), np.nan + np.nan * 1j], dtype=yi.dtype
)
for method, interpolator in [
("linear", NumpyInterpolator),
("linear", ScipyInterpolator),
]:
f = interpolator(xi, yi, method=method)
actual = f(x)
assert_array_equal(actual, expected)
| {
"content_hash": "09f56d24510207ba38aaafda35d8860a",
"timestamp": "",
"source": "github",
"line_count": 728,
"max_line_length": 112,
"avg_line_length": 32.10851648351648,
"alnum_prop": 0.6031229946524064,
"repo_name": "pydata/xarray",
"id": "6f3d7a702dd92c48b3e47d180480775de7f04170",
"size": "23375",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "xarray/tests/test_missing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "HTML",
"bytes": "1343"
},
{
"name": "Python",
"bytes": "4767910"
},
{
"name": "Shell",
"bytes": "1262"
}
],
"symlink_target": ""
} |
print("http://machinelearningmastery.com/machine-learning-in-python-step-by-step/")
import sys
print('Python: {}'.format(sys.version))
# scipy
import scipy
print('scipy: {}'.format(scipy.__version__))
# numpy
import numpy
print('numpy: {}'.format(numpy.__version__))
# matplotlib
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
# pandas
import pandas
print('pandas: {}'.format(pandas.__version__))
# scikit-learn
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
| {
"content_hash": "72c29ac4ec485a2a90ddd77f49fbe622",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 26.473684210526315,
"alnum_prop": 0.7137176938369781,
"repo_name": "yvlasov/ConProbIN",
"id": "8c4c78fcd0ebc28aab1cab5d80ce32714b6b0e16",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "try-ml/check_env.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6882"
}
],
"symlink_target": ""
} |
import os
import bz2
import gzip
import simplejson
import struct
def mkdir_p(dirname):
'like mkdir -p'
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if not os.path.exists(dirname):
raise e
def open_compressed(filename, mode='r'):
if 'w' in mode:
dirname = os.path.dirname(filename)
if dirname:
mkdir_p(dirname)
if filename.endswith('.bz2'):
return bz2.BZ2File(filename, mode.replace('b', ''))
elif filename.endswith('.gz'):
return gzip.GzipFile(filename, mode)
else:
return file(filename, mode)
def json_dump(data, filename, **kwargs):
with open_compressed(filename, 'w') as f:
simplejson.dump(data, f, **kwargs)
def json_load(filename):
with open_compressed(filename, 'rb') as f:
return simplejson.load(f)
def json_stream_dump(stream, filename, **kwargs):
kwargs['separators'] = (',', ':')
stream = iter(stream)
with open_compressed(filename, 'w') as f:
f.write('[')
try:
item = next(stream)
f.write('\n')
simplejson.dump(item, f, **kwargs)
for item in stream:
f.write(',\n')
simplejson.dump(item, f, **kwargs)
except StopIteration:
pass
f.write('\n]')
def json_costream_dump(filename, **kwargs):
kwargs['separators'] = (',', ':')
with open_compressed(filename, 'w') as f:
f.write('[')
try:
item = (yield)
f.write('\n')
simplejson.dump(item, f, **kwargs)
while True:
item = (yield)
f.write(',\n')
simplejson.dump(item, f, **kwargs)
except GeneratorExit:
pass
f.write('\n]')
class json_stream_load(object):
'''
Read json data that was created by json_stream_dump or json_costream_dump.
Note that this exploits newline formatting in the above dumpers.
In particular:
- the first line is '['
- intermediate lines are of the form '{},'.format(json_parsable_content)
- the penultimate line is of the form '{}'.format(json_parsable_content)
- the last line is ']'
- there is no trailing whitespace
An alternative would be to use ijson to streamingly load arbitrary json
files, however in practice this is ~40x slower.
'''
def __init__(self, filename):
self.fd = open_compressed(filename, 'rb')
line = self.fd.readline(2)
if line != '[\n':
raise IOError(
'Unhandled format for json_stream_load. '
'Try recreating json file with the compatible '
'json_stream_dump or json_costream_dump.')
def __iter__(self):
return self
def next(self):
line = self.fd.readline().rstrip(',\n')
if line == ']':
self.close()
raise StopIteration
else:
return simplejson.loads(line)
def close(self):
self.fd.close()
def protobuf_stream_write(item, fd):
assert isinstance(item, str), item
fd.write(struct.pack('<I', len(item)))
fd.write(item)
def protobuf_stream_read(fd):
size_str = fd.read(4)
if len(size_str) < 4:
raise StopIteration
size = struct.unpack('<I', size_str)[0]
return fd.read(size)
def protobuf_stream_dump(stream, filename):
with open_compressed(filename, 'wb') as f:
for item in stream:
protobuf_stream_write(item, f)
class protobuf_stream_load(object):
def __init__(self, filename):
self.fd = open_compressed(filename, 'rb')
def __iter__(self):
return self
def next(self):
return protobuf_stream_read(self.fd)
def close(self):
self.fd.close()
| {
"content_hash": "aeb7274453841c231c0b4f7e1ae8efa8",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 26.675862068965518,
"alnum_prop": 0.5685108583247156,
"repo_name": "forcedotcom/distributions",
"id": "dbe20a09bd0caefe193559b167a8ce2e7210f548",
"size": "5410",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "distributions/io/stream.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "266978"
},
{
"name": "Python",
"bytes": "408462"
},
{
"name": "Shell",
"bytes": "8475"
},
{
"name": "TeX",
"bytes": "2585"
}
],
"symlink_target": ""
} |
class SessionSettings(object):
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4
def SESSION_COOKIE_NAME(self):
return self.PROJECT_NAME
LOGIN_URL = '/admin/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' | {
"content_hash": "f5392ca3d8493dfb3da51fd91ee2476b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6955128205128205,
"repo_name": "futurecolors/tinned-django",
"id": "8f25b747c3bdc61601c342a99f3169316b5fe1b3",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinned_django/project_name/config/django_config/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "320"
},
{
"name": "Python",
"bytes": "26005"
}
],
"symlink_target": ""
} |
import asyncio
from opentracing.ext import tags
from ..otel_ot_shim_tracer import MockTracer
from ..testcase import OpenTelemetryTestCase
from ..utils import get_logger, get_one_by_operation_name, stop_loop_when
from .request_handler import RequestHandler
logger = get_logger(__name__)
class Client:
def __init__(self, request_handler, loop):
self.request_handler = request_handler
self.loop = loop
async def send_task(self, message):
request_context = {}
async def before_handler():
self.request_handler.before_request(message, request_context)
async def after_handler():
self.request_handler.after_request(message, request_context)
await before_handler()
await after_handler()
return f"{message}::response"
def send(self, message):
return self.send_task(message)
def send_sync(self, message):
return self.loop.run_until_complete(self.send_task(message))
class TestAsyncio(OpenTelemetryTestCase):
"""
There is only one instance of 'RequestHandler' per 'Client'. Methods of
'RequestHandler' are executed in different Tasks, and no Span propagation
among them is done automatically.
Therefore we cannot use current active span and activate span.
So one issue here is setting correct parent span.
"""
def setUp(self):
self.tracer = MockTracer()
self.loop = asyncio.get_event_loop()
self.client = Client(RequestHandler(self.tracer), self.loop)
def test_two_callbacks(self):
res_future1 = self.loop.create_task(self.client.send("message1"))
res_future2 = self.loop.create_task(self.client.send("message2"))
stop_loop_when(
self.loop,
lambda: len(self.tracer.finished_spans()) >= 2,
timeout=5.0,
)
self.loop.run_forever()
self.assertEqual("message1::response", res_future1.result())
self.assertEqual("message2::response", res_future2.result())
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 2)
for span in spans:
self.assertEqual(
span.attributes.get(tags.SPAN_KIND, None),
tags.SPAN_KIND_RPC_CLIENT,
)
self.assertNotSameTrace(spans[0], spans[1])
self.assertIsNone(spans[0].parent)
self.assertIsNone(spans[1].parent)
def test_parent_not_picked(self):
"""Active parent should not be picked up by child."""
async def do_task():
with self.tracer.start_active_span("parent"):
response = await self.client.send_task("no_parent")
self.assertEqual("no_parent::response", response)
self.loop.run_until_complete(do_task())
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 2)
child_span = get_one_by_operation_name(spans, "send")
self.assertIsNotNone(child_span)
parent_span = get_one_by_operation_name(spans, "parent")
self.assertIsNotNone(parent_span)
# Here check that there is no parent-child relation.
self.assertIsNotChildOf(child_span, parent_span)
def test_good_solution_to_set_parent(self):
"""Asyncio and contextvars are integrated, in this case it is not needed
to activate current span by hand.
"""
async def do_task():
with self.tracer.start_active_span("parent"):
# Set ignore_active_span to False indicating that the
# framework will do it for us.
req_handler = RequestHandler(
self.tracer,
ignore_active_span=False,
)
client = Client(req_handler, self.loop)
response = await client.send_task("correct_parent")
self.assertEqual("correct_parent::response", response)
# Send second request, now there is no active parent,
# but it will be set, ups
response = await client.send_task("wrong_parent")
self.assertEqual("wrong_parent::response", response)
self.loop.run_until_complete(do_task())
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 3)
parent_span = get_one_by_operation_name(spans, "parent")
self.assertIsNotNone(parent_span)
spans = [span for span in spans if span != parent_span]
self.assertIsChildOf(spans[0], parent_span)
self.assertIsNotChildOf(spans[1], parent_span)
| {
"content_hash": "0c0291ccae2dc23431f5f8a1eb674eb1",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 80,
"avg_line_length": 34.01481481481481,
"alnum_prop": 0.625,
"repo_name": "open-telemetry/opentelemetry-python",
"id": "14958418a325a1f8b2b3ecc546841a026b35c2e5",
"size": "4592",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler/test_asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "1683"
},
{
"name": "Python",
"bytes": "1788131"
},
{
"name": "Shell",
"bytes": "6950"
},
{
"name": "Thrift",
"bytes": "17840"
}
],
"symlink_target": ""
} |
from dataclasses import dataclass
from typing import Optional
@dataclass(init=False, repr=False)
class Token:
"""
A simple token representation, keeping track of the token's text, offset in the passage it was
taken from, POS tag, dependency relation, and similar information. These fields match spacy's
exactly, so we can just use a spacy token for this.
# Parameters
text : `str`, optional
The original text represented by this token.
idx : `int`, optional
The character offset of this token into the tokenized passage.
idx_end : `int`, optional
The character offset one past the last character in the tokenized passage.
lemma_ : `str`, optional
The lemma of this token.
pos_ : `str`, optional
The coarse-grained part of speech of this token.
tag_ : `str`, optional
The fine-grained part of speech of this token.
dep_ : `str`, optional
The dependency relation for this token.
ent_type_ : `str`, optional
The entity type (i.e., the NER tag) for this token.
text_id : `int`, optional
If your tokenizer returns integers instead of strings (e.g., because you're doing byte
encoding, or some hash-based embedding), set this with the integer. If this is set, we
will bypass the vocabulary when indexing this token, regardless of whether `text` is also
set. You can `also` set `text` with the original text, if you want, so that you can
still use a character-level representation in addition to a hash-based word embedding.
type_id : `int`, optional
Token type id used by some pretrained language models like original BERT
The other fields on `Token` follow the fields on spacy's `Token` object; this is one we
added, similar to spacy's `lex_id`.
"""
__slots__ = [
"text",
"idx",
"idx_end",
"lemma_",
"pos_",
"tag_",
"dep_",
"ent_type_",
"text_id",
"type_id",
]
# Defining the `__slots__` of this class is an optimization that dramatically reduces
# the size in memory of a `Token` instance. The downside of using `__slots__`
# with a dataclass is that you can't assign default values at the class level,
# which is why we need a custom `__init__` function that provides the default values.
text: Optional[str]
idx: Optional[int]
idx_end: Optional[int]
lemma_: Optional[str]
pos_: Optional[str]
tag_: Optional[str]
dep_: Optional[str]
ent_type_: Optional[str]
text_id: Optional[int]
type_id: Optional[int]
def __init__(
self,
text: str = None,
idx: int = None,
idx_end: int = None,
lemma_: str = None,
pos_: str = None,
tag_: str = None,
dep_: str = None,
ent_type_: str = None,
text_id: int = None,
type_id: int = None,
) -> None:
assert text is None or isinstance(
text, str
) # Some very hard to debug errors happen when this is not true.
self.text = text
self.idx = idx
self.idx_end = idx_end
self.lemma_ = lemma_
self.pos_ = pos_
self.tag_ = tag_
self.dep_ = dep_
self.ent_type_ = ent_type_
self.text_id = text_id
self.type_id = type_id
def __str__(self):
return self.text
def __repr__(self):
return self.__str__()
def ensure_text(self) -> str:
"""
Return the `text` field, raising an exception if it's `None`.
"""
if self.text is None:
raise ValueError("Unexpected null text for token")
else:
return self.text
def show_token(token: Token) -> str:
return (
f"{token.text} "
f"(idx: {token.idx}) "
f"(idx_end: {token.idx_end}) "
f"(lemma: {token.lemma_}) "
f"(pos: {token.pos_}) "
f"(tag: {token.tag_}) "
f"(dep: {token.dep_}) "
f"(ent_type: {token.ent_type_}) "
f"(text_id: {token.text_id}) "
f"(type_id: {token.type_id}) "
)
| {
"content_hash": "eaeece7456c28443dc99e11c19018a39",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 98,
"avg_line_length": 33.03174603174603,
"alnum_prop": 0.5812109562710236,
"repo_name": "allenai/allennlp",
"id": "c4f971ae01ddc2b96d8f8fe1ba902899741c1281",
"size": "4162",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "allennlp/data/tokenizers/token_class.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name="lexical-uuid",
version="0.1.1",
url='http://github.com/samuraisam/lexical_uuid',
author='Samuel Sutch',
author_email='sam@sutch.net',
description='A sensible class for dealing with Lexical UUIDs',
long_description=
"""
lexical_uuid is a lightweight Python library for sensibly dealing with Lexical UUIDs. Handy when you want a roughly-ordered primary key for your database that any node in your system can generate.
""",
packages=['lexical_uuid'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
)
| {
"content_hash": "41f10f2939f2ec3bd4662479d142976f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 196,
"avg_line_length": 36.791666666666664,
"alnum_prop": 0.6613816534541337,
"repo_name": "samuraisam/lexical_uuid",
"id": "8f18b8f22c1160a769f8d0863310bdc13d65a17b",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6446"
}
],
"symlink_target": ""
} |
from meocloud.client.linux.protocol.daemon_ui.ttypes import InitResult, StatusResult, RemoteDirectoryListingResult, SyncStatus
from meocloud.client.linux.protocol.daemon_core.ttypes import State
from meocloud.client.linux.protocol.daemon_ui import UI
from meocloud.client.linux.thrift_utils import ThriftListener
# Application specific imports
from meocloud.client.linux.settings import LOGGER_NAME, VERSION
from meocloud.client.linux.timeouts import CONNECTION_REQUIRED_TIMEOUT, USER_ACTION_REQUIRED_TIMEOUT
from meocloud.client.linux.daemon.communication import Events, AsyncResults, DaemonState
from meocloud.client.linux.daemon import api
from meocloud.client.linux.utils import get_sync_code, dict_from_slots_obj
# Logging
import logging
log = logging.getLogger(LOGGER_NAME)
class UIListener(ThriftListener):
def __init__(self, socket, core_client, ui_config, notifs_handler):
handler = UIListenerHandler(core_client, ui_config, notifs_handler)
processor = UI.Processor(handler)
super(UIListener, self).__init__('UIListener', socket, processor)
class UIListenerHandler(UI.Iface):
def __init__(self, core_client, ui_config, notifs_handler):
super(UIListenerHandler, self).__init__()
self.core_client = core_client
self.ui_config = ui_config
self.notifs_handler = notifs_handler
### THRIFT METHODS ###
def waitForAuthorization(self):
return self.init(timeout=USER_ACTION_REQUIRED_TIMEOUT)
def init(self, timeout=CONNECTION_REQUIRED_TIMEOUT):
log.debug('UIListener.init() <<<<')
Events.state_changed.wait(timeout=timeout)
if Events.state_changed.is_set():
Events.state_changed.clear()
log.debug('UIListener.init: State has changed, current state: {0}'.format(DaemonState.current))
if DaemonState.current == DaemonState.AUTHORIZATION_REQUIRED:
return InitResult.AUTHORIZATION_REQUIRED
elif DaemonState.current == DaemonState.OFFLINE:
return InitResult.OFFLINE
elif DaemonState.current == DaemonState.AUTHORIZATION_OK:
return InitResult.AUTHORIZATION_OK
elif DaemonState.current == DaemonState.ROOT_FOLDER_MISSING:
return InitResult.ROOT_FOLDER_MISSING
# TODO Improve error handling
assert False
else:
log.warning('UIListener.init: Timed-out while waiting for state change')
Events.shutdown_required.set()
return InitResult.TIMEDOUT
def startCore(self):
log.info('UIListener.startCore() <<<<')
Events.core_start_ready.set()
def authorizeWithDeviceName(self, device_name):
log.debug('UIListener.authorizeWithDeviceName({0}) <<<<'.format(device_name))
url = self.core_client.authorizeWithDeviceName(device_name)
return url
def status(self):
log.debug('UIListener.status() <<<<')
status_result = StatusResult()
status = self.core_client.currentStatus()
if status.state == State.SYNCING:
sync_status = self.core_client.currentSyncStatus()
sync_status_dict = dict_from_slots_obj(sync_status)
sync_status_dict['syncCode'] = get_sync_code(status.statusCode)
status_result.syncStatus = SyncStatus(**sync_status_dict)
status_result.status = status
status_result.persistentNotifs = self.notifs_handler.get_persistent_notifs()
return status_result
def recentlyChangedFilePaths(self):
log.debug('UIListener.recentlyChangedFilePaths() <<<<')
pass
def pause(self):
log.debug('UIListener.pause() <<<<')
self.core_client.pause()
def unpause(self):
log.debug('UIListener.unpause() <<<<')
self.core_client.unpause()
def shutdown(self):
log.debug('UIListener.shutdown() <<<<')
Events.shutdown_required.set()
def unlink(self):
log.debug('UIListener.unlink() <<<<')
return api.unlink(self.core_client, self.ui_config)
def networkSettingsChanged(self):
log.debug('UIListener.networkSettingsChanged() <<<<')
self.core_client.networkSettingsChanged(api.get_network_settings(self.ui_config))
def remoteDirectoryListing(self, path):
log.debug('UIListener.requestRemoteDirectoryListing({0}) <<<<'.format(path))
AsyncResults.remote_directory_listing.create()
self.core_client.requestRemoteDirectoryListing(path)
args = AsyncResults.remote_directory_listing.get(timeout=CONNECTION_REQUIRED_TIMEOUT)
if args is None:
return RemoteDirectoryListingResult()
result = RemoteDirectoryListingResult()
result.statusCode = args['statusCode']
result.path = args['path']
result.listing = args['listing']
return result
def ignoredDirectories(self):
log.debug('UIListener.ignoredDirectories() <<<<')
return self.core_client.ignoredDirectories()
def setIgnoredDirectories(self, paths):
log.debug('UIListener.setIgnoredDirectories({0}) <<<<'.format(paths))
self.core_client.setIgnoredDirectories(paths)
def webLoginURL(self):
log.debug('UIListener.webLoginURL() <<<<')
pass
def ping(self):
log.debug('UIListener.ping() <<<<')
return True
def version(self):
log.debug('UIListener.version() <<<<')
return VERSION
def coreVersion(self):
log.debug('UIListener.coreVersion() <<<<')
return self.core_client.version()
| {
"content_hash": "9589430090e5ea8ad3f5023b149cafb8",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 126,
"avg_line_length": 40.2589928057554,
"alnum_prop": 0.6731593995711223,
"repo_name": "sapo/meocloud-cli",
"id": "281e30585aaa7d2a23700821017720c458b4d620",
"size": "5621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meocloud/client/linux/daemon/ui_listener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "459181"
},
{
"name": "Shell",
"bytes": "5975"
},
{
"name": "Thrift",
"bytes": "1689"
}
],
"symlink_target": ""
} |
import theano
from theano import tensor
import numpy
from utils import init_embedding_table
from collections import OrderedDict
from blocks.bricks import Tanh, Softmax, Linear, MLP, Identity, Rectifier
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import LSTM, GatedRecurrent, Bidirectional
from blocks.bricks.attention import SequenceContentAttention
from blocks.bricks.parallel import Fork
from blocks.bricks.sequence_generators import (
SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback)
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph, apply_dropout, apply_noise
from blocks.monitoring import aggregation
from blocks.initialization import Orthogonal, IsotropicGaussian, Constant
from blocks.bricks.base import application
from blocks.roles import add_role, WEIGHT, BIAS
def numpy_floatX(data):
return numpy.asarray(data, dtype=theano.config.floatX)
def _p(pp, name):
return '%s_%s' % (pp, name)
def make_bidir_lstm_stack(seq, seq_dim, mask, sizes, skip=True, name=''):
bricks = []
curr_dim = [seq_dim]
curr_hidden = [seq]
hidden_list = []
for k, dim in enumerate(sizes):
fwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_fwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
fwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_fwd_lstm_%d'%(name,k))
bwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_bwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
bwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_bwd_lstm_%d'%(name,k))
bricks = bricks + [fwd_lstm, bwd_lstm] + fwd_lstm_ins + bwd_lstm_ins
fwd_tmp = sum(x.apply(v) for x, v in zip(fwd_lstm_ins, curr_hidden))
bwd_tmp = sum(x.apply(v) for x, v in zip(bwd_lstm_ins, curr_hidden))
fwd_hidden, _ = fwd_lstm.apply(fwd_tmp, mask=mask)
bwd_hidden, _ = bwd_lstm.apply(bwd_tmp[::-1], mask=mask[::-1])
hidden_list = hidden_list + [fwd_hidden, bwd_hidden]
if skip:
curr_hidden = [seq, fwd_hidden, bwd_hidden[::-1]]
curr_dim = [seq_dim, dim, dim]
else:
curr_hidden = [fwd_hidden, bwd_hidden[::-1]]
curr_dim = [dim, dim]
return bricks, hidden_list
class MaskedSoftmaxEmitter(SoftmaxEmitter):
def __init__(self, context_bag, **kwargs):
super(MaskedSoftmaxEmitter, self).__init__(**kwargs)
self.context_bag = context_bag
@application
def probs(self, readouts):
readouts = tensor.switch(self.context_bag, readouts, -1000 * tensor.ones_like(readouts))
return self.softmax.apply(readouts, extra_ndim=readouts.ndim - 2)
@application
def cost(self, readouts, outputs):
#readouts = tensor.switch(self.context_bag, readouts, -1000 * tensor.ones_like(readouts))
return self.softmax.categorical_cross_entropy(outputs, readouts, extra_ndim=readouts.ndim - 2)
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def rand_weight(ndim, ddim, lo, hi):
randn = numpy.random.rand(ndim, ddim)
randn = randn * (hi - lo) + lo
return randn.astype(theano.config.floatX)
def init_params(data_dim, lstm_dim):
params = OrderedDict()
W = numpy.concatenate([rand_weight(data_dim, lstm_dim, -0.08, 0.08),
rand_weight(data_dim, lstm_dim, -0.08, 0.08),
rand_weight(data_dim, lstm_dim, -0.08, 0.08),
rand_weight(data_dim, lstm_dim, -0.08, 0.08)], axis=1)
params['lstm_de_W'] = W
U = numpy.concatenate([rand_weight(lstm_dim, lstm_dim, -0.08, 0.08),
rand_weight(lstm_dim, lstm_dim, -0.08, 0.08),
rand_weight(lstm_dim, lstm_dim, -0.08, 0.08),
rand_weight(lstm_dim, lstm_dim, -0.08, 0.08)], axis=1)
params['lstm_de_U'] = U
b = numpy.zeros((4 * lstm_dim,))
params['lstm_de_b'] = b.astype(theano.config.floatX)
params['lstm_hterm'] = rand_weight(lstm_dim, 1, -0.08, 0.08)[:, 0]
# ptr parameters
params['ptr_W1'] = rand_weight(lstm_dim, lstm_dim, -0.08, 0.08)
params['ptr_W2'] = rand_weight(lstm_dim, lstm_dim, -0.08, 0.08)
params['ptr_v'] = rand_weight(lstm_dim, 1, -0.08, 0.08)[:, 0]
return params
def ptr_network(tparams, cqembed, context_mask, ans_indices, ans_indices_mask, decoder_lstm_output_dim, cenc):
#cqembed: length * batch_size * (embed+2*lstm_size)
n_sizes = cqembed.shape[0] #context length
n_samples = cqembed.shape[1] if cqembed.ndim == 3 else 1 #batch_size
n_steps = ans_indices.shape[0] #answer length
assert context_mask is not None
assert ans_indices_mask is not None
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
if _x.ndim == 2:
return _x[:, n * dim:(n + 1) * dim]
return _x[n * dim:(n + 1) * dim]
def softmax(m_, x_):
maxes = tensor.max(x_, axis=0, keepdims=True)
e = tensor.exp(x_ - maxes)
dist = e / tensor.sum(e * m_, axis=0)
return dist
def _lstm(input_mask, input_embedding, h_, c_, prefix='lstm_en'):
preact = tensor.dot(input_embedding, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
preact += tensor.dot(h_, tparams[_p(prefix, 'U')])
i = tensor.nnet.sigmoid(_slice(preact, 0, decoder_lstm_output_dim))
f = tensor.nnet.sigmoid(_slice(preact, 1, decoder_lstm_output_dim))
o = tensor.nnet.sigmoid(_slice(preact, 2, decoder_lstm_output_dim))
c = tensor.tanh(_slice(preact, 3, decoder_lstm_output_dim))
c = f * c_ + i * c
c = input_mask[:, None] * c + (1. - input_mask)[:, None] * c_
h = o * tensor.tanh(c)
h = input_mask[:, None] * h + (1. - input_mask)[:, None] * h_
return h, c
def prediction_ptr_probs(prior_word_index, decoded_old, c_, cenc, hiddens_mask): #decoded_old Initialized with cenc[-1]
prior_word_embedding = cqembed[prior_word_index, tensor.arange(n_samples), :] # batch_size * (embed+2*lstm_size)
decoded, c = _lstm(tensor.ones(shape=(5),dtype=theano.config.floatX), prior_word_embedding, decoded_old, c_, 'lstm_de') # batch_size * decoder_lstm_output_dim
attention_weights = tensor.dot(cenc, tparams['ptr_W1']) + tensor.dot(decoded, tparams['ptr_W2']) #(context)length * batch_size * (decoder_lstm_output_dim=lstm_size*2)
attention_weights = tensor.tanh(attention_weights) # length * batch_size * lstm_size*2
attention_weights = tensor.dot(attention_weights, tparams['ptr_v']) # length * batch_size
prob = softmax(hiddens_mask, attention_weights)
prediction_index = prob.argmax(axis=0) #batch_size
return prediction_index, decoded, c
def _ptr_probs(ans_indice_mask, ans_indice, decoded_old, c_, _, cenc, hiddens_mask): #decoded_old Initialized with cenc[-1]
pred_cembed = cqembed[ans_indice, tensor.arange(n_samples), :] # batch_size * (embed+2*lstm_size)
decoded, c = _lstm(ans_indice_mask, pred_cembed, decoded_old, c_, 'lstm_de') # batch_size * decoder_lstm_output_dim
attention_weights = tensor.dot(cenc, tparams['ptr_W1']) + tensor.dot(decoded, tparams['ptr_W2']) #(context)length * batch_size * (decoder_lstm_output_dim=lstm_size*2)
attention_weights = tensor.tanh(attention_weights) # length * batch_size * lstm_size*2
attention_weights = tensor.dot(attention_weights, tparams['ptr_v']) # length * batch_size
prob = softmax(hiddens_mask, attention_weights)
return decoded, c, prob
# decoding
hiddens_mask = tensor.set_subtensor(context_mask[0, :], tensor.constant(1, dtype=theano.config.floatX))
gen_steps = 5
gen_vals , _ = theano.scan(prediction_ptr_probs,
sequences=None,
outputs_info=[tensor.alloc(numpy.int64(0), n_samples), #context_length * batch_size
cenc[-1],
tensor.alloc(numpy_floatX(0.), n_samples, decoder_lstm_output_dim)], #decoded embeddings (d in paper) cells[-1], #batch_size * (decoder_lstm_output_dim=2*lstm_size)
non_sequences=[cenc, hiddens_mask],
name="generating",
n_steps=gen_steps)
rval, _ = theano.scan(_ptr_probs,
sequences=[ans_indices_mask, ans_indices],
outputs_info=[cenc[-1], # batch_size * (dim_proj=decoder_lstm_output_dim=2*lstm_size) init value for decoded step i-1
tensor.alloc(numpy_floatX(0.), n_samples, decoder_lstm_output_dim), #decoded embeddings (d in paper) cells[-1], #batch_size * (decoder_lstm_output_dim=2*lstm_size)
tensor.alloc(numpy_floatX(0.), n_sizes, n_samples)], #context_length * batch_size
non_sequences=[cenc, hiddens_mask],
name='decoding',
n_steps=n_steps)
preds = rval[2] #length * batch_size
generations = gen_vals[0]
return preds, generations
class Model():
def __init__(self, config, vocab_size):
question = tensor.imatrix('question')
question_mask = tensor.imatrix('question_mask')
context = tensor.imatrix('context')
context_mask = tensor.imatrix('context_mask')
answer = tensor.imatrix('answer')
answer_mask = tensor.imatrix('answer_mask')
ans_indices = tensor.imatrix('ans_indices') # n_steps * n_samples
ans_indices_mask = tensor.imatrix('ans_indices_mask')
context_bag = tensor.eq(context[:,:,None],tensor.arange(vocab_size)).sum(axis = 1).clip(0,1)
bricks = []
question = question.dimshuffle(1, 0)
question_mask = question_mask.dimshuffle(1, 0)
context = context.dimshuffle(1, 0)
context_mask = context_mask.dimshuffle(1, 0)
answer = answer.dimshuffle(1, 0)
answer_mask = answer_mask.dimshuffle(1, 0)
ans_indices = ans_indices.dimshuffle(1, 0)
ans_indices_mask = ans_indices_mask.dimshuffle(1, 0)
# Embed questions and context
embed = LookupTable(vocab_size, config.embed_size, name='question_embed')
embed.weights_init = IsotropicGaussian(0.01)
# embeddings_initial_value = init_embedding_table(filename='embeddings/vocab_embeddings.txt')
# embed.weights_init = Constant(embeddings_initial_value)
# Calculate question encoding (concatenate layer1)
qembed = embed.apply(question)
qlstms, qhidden_list = make_bidir_lstm_stack(qembed, config.embed_size, question_mask.astype(theano.config.floatX),
config.question_lstm_size, config.question_skip_connections, 'q')
bricks = bricks + qlstms
if config.question_skip_connections:
qenc_dim = 2*sum(config.question_lstm_size)
qenc = tensor.concatenate([h[-1,:,:] for h in qhidden_list], axis=1)
else:
qenc_dim = 2*config.question_lstm_size[-1]
qenc = tensor.concatenate([h[-1,:,:] for h in qhidden_list[-2:]], axis=1)
qenc.name = 'qenc'
#embed size: 200, lstm_size = 256
#qenc: length * batch_size * (2*lstm_size)
# Calculate context encoding (concatenate layer1)
cembed = embed.apply(context)
cqembed = tensor.concatenate([cembed, tensor.extra_ops.repeat(qenc[None, :, :], cembed.shape[0], axis=0)], axis=2) #length * batch_size * (embed+2*lstm_size) this is what goes into encoder
clstms, chidden_list = make_bidir_lstm_stack(cqembed, config.embed_size + qenc_dim, context_mask.astype(theano.config.floatX),
config.ctx_lstm_size, config.ctx_skip_connections, 'ctx')
bricks = bricks + clstms
if config.ctx_skip_connections:
cenc_dim = 2*sum(config.ctx_lstm_size) #2 : fw & bw
cenc = tensor.concatenate(chidden_list, axis=2)
else:
cenc_dim = 2*config.question_lstm_size[-1]
cenc = tensor.concatenate(chidden_list[-2:], axis=2)
cenc.name = 'cenc'
#cenc: length * batch_size * (2*lstm_size)
#pointer networks decoder LSTM and Attention parameters
params = init_params(data_dim=config.decoder_data_dim, lstm_dim=config.decoder_lstm_output_dim)
tparams = init_tparams(params)
self.theano_params = []
add_role(tparams['lstm_de_W'],WEIGHT)
add_role(tparams['lstm_de_U'],WEIGHT)
add_role(tparams['lstm_de_b'],BIAS)
add_role(tparams['ptr_v'],WEIGHT)
add_role(tparams['ptr_W1'],WEIGHT)
add_role(tparams['ptr_W2'],WEIGHT)
self.theano_params = tparams.values()
# for p in tparams.values():
# add_role(p, WEIGHT)
# self.theano_params.append(p)
#n_steps = length , n_samples = batch_size
n_steps = ans_indices.shape[0]
n_samples = ans_indices.shape[1]
preds, generations = ptr_network(tparams,
cqembed,
context_mask.astype(theano.config.floatX),
ans_indices,
ans_indices_mask.astype(theano.config.floatX),
config.decoder_lstm_output_dim,
cenc)
self.generations = generations
idx_steps = tensor.outer(tensor.arange(n_steps, dtype='int64'), tensor.ones((n_samples,), dtype='int64'))
idx_samples = tensor.outer(tensor.ones((n_steps,), dtype='int64'), tensor.arange(n_samples, dtype='int64'))
probs = preds[idx_steps, ans_indices, idx_samples]
# probs *= y_mask
off = 1e-8
if probs.dtype == 'float16':
off = 1e-6
# probs += (1 - y_mask) # change unmasked position to 1, since log(1) = 0
probs += off
# probs_printed = theano.printing.Print('this is probs')(probs)
cost = -tensor.log(probs)
cost *= ans_indices_mask
cost = cost.sum(axis=0) / ans_indices_mask.sum(axis=0)
cost = cost.mean()
# Apply dropout
cg = ComputationGraph([cost])
if config.w_noise > 0:
noise_vars = VariableFilter(roles=[WEIGHT])(cg)
cg = apply_noise(cg, noise_vars, config.w_noise)
if config.dropout > 0:
cg = apply_dropout(cg, qhidden_list + chidden_list, config.dropout)
[cost_reg] = cg.outputs
# Other stuff
cost.name = 'cost'
cost_reg.name = 'cost_reg'
self.sgd_cost = cost_reg
self.monitor_vars = [[cost_reg]]
self.monitor_vars_valid = [[cost_reg]]
# Initialize bricks
embed.initialize()
for brick in bricks:
brick.weights_init = config.weights_init
brick.biases_init = config.biases_init
brick.initialize()
| {
"content_hash": "4daf3e5b7c911997f00ac9001b482737",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 213,
"avg_line_length": 47.54205607476636,
"alnum_prop": 0.5999606840967171,
"repo_name": "arianhosseini/Question-Answering",
"id": "71b880e70d2131c85a76aa0017d26ee217adab86",
"size": "15261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/ptr_net.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226172"
}
],
"symlink_target": ""
} |
class Global:
var = 'Hello'
if __name__ == "__main__":
print Global.var
Global.var = 5
print Global.var | {
"content_hash": "778dc7570c368ee17ba71dc5069ee658",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 26,
"avg_line_length": 17.142857142857142,
"alnum_prop": 0.5583333333333333,
"repo_name": "traxex33/Twitter-Analysis",
"id": "79906906deebc15a0df0bc9dff355daad219bbf3",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14769"
}
],
"symlink_target": ""
} |
GEVENT_CHECKS = True
THROTTLE_SECONDS = 60 * 60
THROTTLE_LIMIT = 1800
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
try:
from settings_local import *
except ImportError:
pass
import redis
cache = redis.StrictRedis(
host=REDIS_HOST,
port=REDIS_PORT,
db=REDIS_DB) | {
"content_hash": "eb65d0bb8edb077405ff403bc53d1432",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 32,
"avg_line_length": 14.8,
"alnum_prop": 0.6925675675675675,
"repo_name": "n3storm/emailpie",
"id": "388dee58a0d2b311e619d450b267127505c0ae8b",
"size": "296",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "emailpie/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from Child import Child
from Node import Node # noqa: I201
EXPR_NODES = [
# An inout expression.
# &x
Node('InOutExpr', kind='Expr',
children=[
Child('Ampersand', kind='AmpersandToken'),
Child('Identifier', kind='IdentifierToken'),
]),
# A #column expression.
Node('PoundColumnExpr', kind='Expr',
children=[
Child('PoundColumn', kind='PoundColumnToken'),
]),
Node('FunctionCallArgumentList', kind='SyntaxCollection',
element='FunctionCallArgument'),
# The try operator.
# try foo()
# try? foo()
# try! foo()
Node('TryOperator', kind='Syntax',
children=[
Child('TryKeyword', kind='TryToken'),
Child('QuestionOrExclamationMark', kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
]),
# A #line expression.
Node('PoundLineExpr', kind='Expr',
children=[
Child('PoundLine', kind='PoundLineToken'),
]),
# A #file expression.
Node('PoundFileExpr', kind='Expr',
children=[
Child('PoundFile', kind='PoundFileToken'),
]),
# A #function expression.
Node('PoundFunctionExpr', kind='Expr',
children=[
Child('PoundFunction', kind='PoundFunctionToken'),
]),
# symbolic-reference-expression -> identifier generic-argument-clause?
Node('SymbolicReferenceExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# A prefix operator expression.
# -x
# !true
Node('PrefixOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='PrefixOperatorToken',
is_optional=True),
Child('PostfixExpression', kind='Expr'),
]),
# A floating-point literal
# 4.0
# -3.9
# +4e20
Node('FloatLiteralExpr', kind='Expr',
children=[
Child('Sign', kind='PrefixOperatorToken',
is_optional=True),
Child('FloatingDigits', kind='FloatingLiteralToken'),
]),
Node('FunctionCallExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftParen', kind='LeftParenToken'),
Child('ArgumentList', kind='FunctionCallArgumentList'),
Child('RightParen', kind='RightParenToken'),
]),
# function-call-argument -> label? ':'? expression ','?
Node('FunctionCallArgument', kind='Syntax',
children=[
Child('Label', kind='IdentifierToken',
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# An integer literal.
# 3
# +3_400
# +0x4f
Node('IntegerLiteralExpr', kind='Expr',
children=[
Child('Sign', kind='PrefixOperatorToken',
is_optional=True),
Child('Digits', kind='IntegerLiteralToken'),
]),
Node('StringLiteralExpr', kind='Expr',
children=[
Child("StringLiteral", kind='StringLiteralToken')
])
]
| {
"content_hash": "b62c49884e00726cb8e21d3e2369fad2",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 74,
"avg_line_length": 30.34453781512605,
"alnum_prop": 0.5306009415674329,
"repo_name": "wenluma/swift",
"id": "07a9d5aff9fe990c0dc3195df3c45caac48d789d",
"size": "3611",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/ExprNodes.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import sys
import os
import traceback
import click
import itertools
from netlib.http import CONTENT_MISSING
import netlib.utils
from . import flow, filt, contentviews
from .exceptions import ContentViewException
from .models import HTTPRequest
class DumpError(Exception):
pass
class Options(object):
attributes = [
"app",
"app_host",
"app_port",
"anticache",
"anticomp",
"client_replay",
"filtstr",
"flow_detail",
"keepserving",
"kill",
"no_server",
"nopop",
"refresh_server_playback",
"replacements",
"rfile",
"rheaders",
"setheaders",
"server_replay",
"scripts",
"showhost",
"stickycookie",
"stickyauth",
"stream_large_bodies",
"verbosity",
"outfile",
"replay_ignore_content",
"replay_ignore_params",
"replay_ignore_payload_params",
"replay_ignore_host"
]
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
for i in self.attributes:
if not hasattr(self, i):
setattr(self, i, None)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options, outfile=None):
flow.FlowMaster.__init__(self, server, flow.State())
self.outfile = outfile
self.o = options
self.anticache = options.anticache
self.anticomp = options.anticomp
self.showhost = options.showhost
self.replay_ignore_params = options.replay_ignore_params
self.replay_ignore_content = options.replay_ignore_content
self.replay_ignore_host = options.replay_ignore_host
self.refresh_server_playback = options.refresh_server_playback
self.replay_ignore_payload_params = options.replay_ignore_payload_params
self.set_stream_large_bodies(options.stream_large_bodies)
if options.filtstr:
self.filt = filt.parse(options.filtstr)
else:
self.filt = None
if options.stickycookie:
self.set_stickycookie(options.stickycookie)
if options.stickyauth:
self.set_stickyauth(options.stickyauth)
if options.outfile:
path = os.path.expanduser(options.outfile[0])
try:
f = open(path, options.outfile[1])
self.start_stream(f, self.filt)
except IOError as v:
raise DumpError(v.strerror)
if options.replacements:
for i in options.replacements:
self.replacehooks.add(*i)
if options.setheaders:
for i in options.setheaders:
self.setheaders.add(*i)
if options.server_replay:
self.start_server_playback(
self._readflow(options.server_replay),
options.kill, options.rheaders,
not options.keepserving,
options.nopop,
options.replay_ignore_params,
options.replay_ignore_content,
options.replay_ignore_payload_params,
options.replay_ignore_host
)
if options.client_replay:
self.start_client_playback(
self._readflow(options.client_replay),
not options.keepserving
)
scripts = options.scripts or []
for command in scripts:
err = self.load_script(command)
if err:
raise DumpError(err)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except flow.FlowReadError as v:
self.add_event("Flow file corrupted.", "error")
raise DumpError(v)
if self.o.app:
self.start_app(self.o.app_host, self.o.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except flow.FlowReadError as e:
raise DumpError(e.strerror)
def add_event(self, e, level="info"):
needed = dict(error=0, info=1, debug=2).get(level, 1)
if self.o.verbosity >= needed:
self.echo(
e,
fg="red" if level == "error" else None,
dim=(level == "debug")
)
@staticmethod
def indent(n, text):
l = str(text).strip().splitlines()
pad = " " * n
return "\n".join(pad + i for i in l)
def echo(self, text, indent=None, **style):
if indent:
text = self.indent(indent, text)
click.secho(text, file=self.outfile, **style)
def _echo_message(self, message):
if self.o.flow_detail >= 2:
headers = "\r\n".join(
"{}: {}".format(
click.style(k, fg="blue", bold=True),
click.style(v, fg="blue"))
for k, v in message.headers.fields
)
self.echo(headers, indent=4)
if self.o.flow_detail >= 3:
if message.content == CONTENT_MISSING:
self.echo("(content missing)", indent=4)
elif message.content:
self.echo("")
try:
type, lines = contentviews.get_content_view(
contentviews.get("Auto"),
message.content,
headers=message.headers
)
except ContentViewException:
s = "Content viewer failed: \n" + traceback.format_exc()
self.add_event(s, "debug")
type, lines = contentviews.get_content_view(
contentviews.get("Raw"),
message.content,
headers=message.headers
)
styles = dict(
highlight=dict(bold=True),
offset=dict(fg="blue"),
header=dict(fg="green", bold=True),
text=dict(fg="green")
)
def colorful(line):
yield u" " # we can already indent here
for (style, text) in line:
yield click.style(text, **styles.get(style, {}))
if self.o.flow_detail == 3:
lines_to_echo = itertools.islice(lines, 70)
else:
lines_to_echo = lines
lines_to_echo = list(lines_to_echo)
content = u"\r\n".join(
u"".join(colorful(line)) for line in lines_to_echo
)
self.echo(content)
if next(lines, None):
self.echo("(cut off)", indent=4, dim=True)
if self.o.flow_detail >= 2:
self.echo("")
def _echo_request_line(self, flow):
if flow.request.stickycookie:
stickycookie = click.style("[stickycookie] ", fg="yellow", bold=True)
else:
stickycookie = ""
if flow.client_conn:
client = click.style(flow.client_conn.address.host, bold=True)
else:
client = click.style("[replay]", fg="yellow", bold=True)
method = flow.request.method
method_color=dict(
GET="green",
DELETE="red"
).get(method.upper(), "magenta")
method = click.style(method, fg=method_color, bold=True)
if self.showhost:
url = flow.request.pretty_url
else:
url = flow.request.url
url = click.style(url, bold=True)
line = "{stickycookie}{client} {method} {url}".format(
stickycookie=stickycookie,
client=client,
method=method,
url=url
)
self.echo(line)
def _echo_response_line(self, flow):
if flow.response.is_replay:
replay = click.style("[replay] ", fg="yellow", bold=True)
else:
replay = ""
code = flow.response.status_code
code_color = None
if 200 <= code < 300:
code_color = "green"
elif 300 <= code < 400:
code_color = "magenta"
elif 400 <= code < 600:
code_color = "red"
code = click.style(str(code), fg=code_color, bold=True, blink=(code == 418))
reason = click.style(flow.response.reason, fg=code_color, bold=True)
if flow.response.content == CONTENT_MISSING:
size = "(content missing)"
else:
size = netlib.utils.pretty_size(len(flow.response.content))
size = click.style(size, bold=True)
arrows = click.style("<<", bold=True)
line = "{replay} {arrows} {code} {reason} {size}".format(
replay=replay,
arrows=arrows,
code=code,
reason=reason,
size=size
)
self.echo(line)
def echo_flow(self, f):
if self.o.flow_detail == 0:
return
if f.request:
self._echo_request_line(f)
self._echo_message(f.request)
if f.response:
self._echo_response_line(f)
self._echo_message(f.response)
if f.error:
self.echo(" << {}".format(f.error.msg), bold=True, fg="red")
if self.outfile:
self.outfile.flush()
def _process_flow(self, f):
self.state.delete_flow(f)
if self.filt and not f.match(self.filt):
return
self.echo_flow(f)
def handle_request(self, f):
flow.FlowMaster.handle_request(self, f)
if f:
f.reply()
return f
def handle_response(self, f):
flow.FlowMaster.handle_response(self, f)
if f:
f.reply()
self._process_flow(f)
return f
def handle_error(self, f):
flow.FlowMaster.handle_error(self, f)
if f:
self._process_flow(f)
return f
def shutdown(self): # pragma: no cover
return flow.FlowMaster.shutdown(self)
def run(self): # pragma: no cover
if self.o.rfile and not self.o.keepserving:
self.shutdown()
return
try:
return flow.FlowMaster.run(self)
except BaseException:
self.shutdown()
raise
| {
"content_hash": "7c0da910bc1411dfd35a85eda46eb3e2",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 84,
"avg_line_length": 30.610951008645532,
"alnum_prop": 0.5189229900207117,
"repo_name": "0xwindows/InfoLeak",
"id": "d2b130f1d6c8ea578875fbc1cb30b03886638b3f",
"size": "10622",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libmproxy/dump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "CSS",
"bytes": "194068"
},
{
"name": "HTML",
"bytes": "2824"
},
{
"name": "JavaScript",
"bytes": "1755960"
},
{
"name": "Python",
"bytes": "661610"
},
{
"name": "Shell",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from HttpMD5Util import buildMySign,httpGet,httpPost
class OKCoinFuture:
def __init__(self,url,apikey,secretkey):
self.__url = url
self.__apikey = apikey
self.__secretkey = secretkey
#OKCOIN期货行情信息
def future_ticker(self,symbol,contractType):
FUTURE_TICKER_RESOURCE = "/api/v1/future_ticker.do"
params = ''
if symbol:
params += '&symbol=' + symbol if params else 'symbol=' +symbol
if contractType:
params += '&contract_type=' + contractType if params else 'contract_type=' +symbol
return httpGet(self.__url,FUTURE_TICKER_RESOURCE,params)
#OKCoin期货市场深度信息
def future_depth(self,symbol,contractType,size):
FUTURE_DEPTH_RESOURCE = "/api/v1/future_depth.do"
params = ''
if symbol:
params += '&symbol=' + symbol if params else 'symbol=' +symbol
if contractType:
params += '&contract_type=' + contractType if params else 'contract_type=' +symbol
if size:
params += '&size=' + size if params else 'size=' + size
return httpGet(self.__url,FUTURE_DEPTH_RESOURCE,params)
#OKCoin期货交易记录信息
def future_trades(self,symbol,contractType):
FUTURE_TRADES_RESOURCE = "/api/v1/future_trades.do"
params = ''
if symbol:
params += '&symbol=' + symbol if params else 'symbol=' +symbol
if contractType:
params += '&contract_type=' + contractType if params else 'contract_type=' +symbol
return httpGet(self.__url,FUTURE_TRADES_RESOURCE,params)
#OKCoin期货指数
def future_index(self,symbol):
FUTURE_INDEX = "/api/v1/future_index.do"
params=''
if symbol:
params = 'symbol=' +symbol
return httpGet(self.__url,FUTURE_INDEX,params)
#获取美元人民币汇率
def exchange_rate(self):
EXCHANGE_RATE = "/api/v1/exchange_rate.do"
return httpGet(self.__url,EXCHANGE_RATE,'')
#获取预估交割价
def future_estimated_price(self,symbol):
FUTURE_ESTIMATED_PRICE = "/api/v1/future_estimated_price.do"
params=''
if symbol:
params = 'symbol=' +symbol
return httpGet(self.__url,FUTURE_ESTIMATED_PRICE,params)
#期货全仓账户信息
def future_userinfo(self):
FUTURE_USERINFO = "/api/v1/future_userinfo.do?"
params ={}
params['api_key'] = self.__apikey
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_USERINFO,params)
#期货全仓持仓信息
def future_position(self,symbol,contractType):
FUTURE_POSITION = "/api/v1/future_position.do?"
params = {
'api_key':self.__apikey,
'symbol':symbol,
'contract_type':contractType
}
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_POSITION,params)
#期货下单
def future_trade(self,symbol,contractType,price='',amount='',tradeType='',matchPrice='',leverRate=''):
FUTURE_TRADE = "/api/v1/future_trade.do?"
params = {
'api_key':self.__apikey,
'symbol':symbol,
'contract_type':contractType,
'amount':amount,
'type':tradeType,
'match_price':matchPrice,
'lever_rate':leverRate
}
if price:
params['price'] = price
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_TRADE,params)
#期货批量下单
def future_batchTrade(self,symbol,contractType,orders_data,leverRate):
FUTURE_BATCH_TRADE = "/api/v1/future_batch_trade.do?"
params = {
'api_key':self.__apikey,
'symbol':symbol,
'contract_type':contractType,
'orders_data':orders_data,
'lever_rate':leverRate
}
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_BATCH_TRADE,params)
#期货取消订单
def future_cancel(self,symbol,contractType,orderId):
FUTURE_CANCEL = "/api/v1/future_cancel.do?"
params = {
'api_key':self.__apikey,
'symbol':symbol,
'contract_type':contractType,
'order_id':orderId
}
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_CANCEL,params)
#期货获取订单信息
def future_orderinfo(self,symbol,contractType,orderId,status,currentPage,pageLength):
FUTURE_ORDERINFO = "/api/v1/future_order_info.do?"
params = {
'api_key':self.__apikey,
'symbol':symbol,
'contract_type':contractType,
'order_id':orderId,
'status':status,
'current_page':currentPage,
'page_length':pageLength
}
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_ORDERINFO,params)
#期货逐仓账户信息
def future_userinfo_4fix(self):
FUTURE_INFO_4FIX = "/api/v1/future_userinfo_4fix.do?"
params = {'api_key':self.__apikey}
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_INFO_4FIX,params)
#期货逐仓持仓信息
def future_position_4fix(self,symbol,contractType,type1):
FUTURE_POSITION_4FIX = "/api/v1/future_position_4fix.do?"
params = {
'api_key':self.__apikey,
'symbol':symbol,
'contract_type':contractType,
'type':type1
}
params['sign'] = buildMySign(params,self.__secretkey)
return httpPost(self.__url,FUTURE_POSITION_4FIX,params)
| {
"content_hash": "fd25e7210c8bb98709b062a54eec15b8",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 106,
"avg_line_length": 34.646341463414636,
"alnum_prop": 0.5909890883491729,
"repo_name": "frrp/cryptoexchange",
"id": "7ec9d58440ea2150e4bfc5bf570d0a12ab4dca1a",
"size": "5955",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cryptoexchange/OkcoinFutureAPI.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "66504"
}
],
"symlink_target": ""
} |
from .instance_view_status import InstanceViewStatus
from .sub_resource import SubResource
from .sku import Sku
from .availability_set import AvailabilitySet
from .virtual_machine_size import VirtualMachineSize
from .virtual_machine_extension_image import VirtualMachineExtensionImage
from .virtual_machine_image_resource import VirtualMachineImageResource
from .virtual_machine_extension_instance_view import VirtualMachineExtensionInstanceView
from .virtual_machine_extension import VirtualMachineExtension
from .purchase_plan import PurchasePlan
from .os_disk_image import OSDiskImage
from .data_disk_image import DataDiskImage
from .virtual_machine_image import VirtualMachineImage
from .usage_name import UsageName
from .usage import Usage
from .virtual_machine_capture_parameters import VirtualMachineCaptureParameters
from .virtual_machine_capture_result import VirtualMachineCaptureResult
from .plan import Plan
from .hardware_profile import HardwareProfile
from .image_reference import ImageReference
from .key_vault_secret_reference import KeyVaultSecretReference
from .key_vault_key_reference import KeyVaultKeyReference
from .disk_encryption_settings import DiskEncryptionSettings
from .virtual_hard_disk import VirtualHardDisk
from .managed_disk_parameters import ManagedDiskParameters
from .os_disk import OSDisk
from .data_disk import DataDisk
from .storage_profile import StorageProfile
from .additional_unattend_content import AdditionalUnattendContent
from .win_rm_listener import WinRMListener
from .win_rm_configuration import WinRMConfiguration
from .windows_configuration import WindowsConfiguration
from .ssh_public_key import SshPublicKey
from .ssh_configuration import SshConfiguration
from .linux_configuration import LinuxConfiguration
from .vault_certificate import VaultCertificate
from .vault_secret_group import VaultSecretGroup
from .os_profile import OSProfile
from .network_interface_reference import NetworkInterfaceReference
from .network_profile import NetworkProfile
from .boot_diagnostics import BootDiagnostics
from .diagnostics_profile import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view import VirtualMachineAgentInstanceView
from .disk_instance_view import DiskInstanceView
from .boot_diagnostics_instance_view import BootDiagnosticsInstanceView
from .virtual_machine_identity import VirtualMachineIdentity
from .maintenance_redeploy_status import MaintenanceRedeployStatus
from .virtual_machine_instance_view import VirtualMachineInstanceView
from .virtual_machine import VirtualMachine
from .rolling_upgrade_policy import RollingUpgradePolicy
from .upgrade_policy import UpgradePolicy
from .image_os_disk import ImageOSDisk
from .image_data_disk import ImageDataDisk
from .image_storage_profile import ImageStorageProfile
from .image import Image
from .virtual_machine_scale_set_identity import VirtualMachineScaleSetIdentity
from .virtual_machine_scale_set_os_profile import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_update_os_profile import VirtualMachineScaleSetUpdateOSProfile
from .virtual_machine_scale_set_managed_disk_parameters import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_update_os_disk import VirtualMachineScaleSetUpdateOSDisk
from .virtual_machine_scale_set_data_disk import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile import VirtualMachineScaleSetStorageProfile
from .virtual_machine_scale_set_update_storage_profile import VirtualMachineScaleSetUpdateStorageProfile
from .api_entity_reference import ApiEntityReference
from .virtual_machine_scale_set_public_ip_address_configuration_dns_settings import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from .virtual_machine_scale_set_public_ip_address_configuration import VirtualMachineScaleSetPublicIPAddressConfiguration
from .virtual_machine_scale_set_update_public_ip_address_configuration import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from .virtual_machine_scale_set_ip_configuration import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_update_ip_configuration import VirtualMachineScaleSetUpdateIPConfiguration
from .virtual_machine_scale_set_network_configuration_dns_settings import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from .virtual_machine_scale_set_network_configuration import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_update_network_configuration import VirtualMachineScaleSetUpdateNetworkConfiguration
from .virtual_machine_scale_set_network_profile import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_update_network_profile import VirtualMachineScaleSetUpdateNetworkProfile
from .virtual_machine_scale_set_extension import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set_update_vm_profile import VirtualMachineScaleSetUpdateVMProfile
from .virtual_machine_scale_set import VirtualMachineScaleSet
from .virtual_machine_scale_set_update import VirtualMachineScaleSetUpdate
from .virtual_machine_scale_set_vm_instance_ids import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku import VirtualMachineScaleSetSku
from .virtual_machine_scale_set_vm import VirtualMachineScaleSetVM
from .virtual_machine_health_status import VirtualMachineHealthStatus
from .virtual_machine_scale_set_vm_instance_view import VirtualMachineScaleSetVMInstanceView
from .rolling_upgrade_running_status import RollingUpgradeRunningStatus
from .rolling_upgrade_progress_info import RollingUpgradeProgressInfo
from .api_error_base import ApiErrorBase
from .inner_error import InnerError
from .api_error import ApiError
from .rolling_upgrade_status_info import RollingUpgradeStatusInfo
from .compute_long_running_operation_properties import ComputeLongRunningOperationProperties
from .resource import Resource
from .update_resource import UpdateResource
from .sub_resource_read_only import SubResourceReadOnly
from .operation_status_response import OperationStatusResponse
from .run_command_input_parameter import RunCommandInputParameter
from .run_command_input import RunCommandInput
from .run_command_parameter_definition import RunCommandParameterDefinition
from .run_command_document_base import RunCommandDocumentBase
from .run_command_document import RunCommandDocument
from .run_command_result import RunCommandResult
from .availability_set_paged import AvailabilitySetPaged
from .virtual_machine_size_paged import VirtualMachineSizePaged
from .usage_paged import UsagePaged
from .image_paged import ImagePaged
from .virtual_machine_paged import VirtualMachinePaged
from .virtual_machine_scale_set_paged import VirtualMachineScaleSetPaged
from .virtual_machine_scale_set_sku_paged import VirtualMachineScaleSetSkuPaged
from .virtual_machine_scale_set_extension_paged import VirtualMachineScaleSetExtensionPaged
from .virtual_machine_scale_set_vm_paged import VirtualMachineScaleSetVMPaged
from .run_command_document_base_paged import RunCommandDocumentBasePaged
from .compute_management_client_enums import (
StatusLevelTypes,
OperatingSystemTypes,
VirtualMachineSizeTypes,
CachingTypes,
DiskCreateOptionTypes,
StorageAccountTypes,
PassNames,
ComponentNames,
SettingNames,
ProtocolTypes,
ResourceIdentityType,
MaintenanceOperationResultCodeTypes,
UpgradeMode,
OperatingSystemStateTypes,
IPVersion,
VirtualMachinePriorityTypes,
VirtualMachineScaleSetSkuScaleType,
RollingUpgradeStatusCode,
RollingUpgradeActionType,
InstanceViewTypes,
)
__all__ = [
'InstanceViewStatus',
'SubResource',
'Sku',
'AvailabilitySet',
'VirtualMachineSize',
'VirtualMachineExtensionImage',
'VirtualMachineImageResource',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtension',
'PurchasePlan',
'OSDiskImage',
'DataDiskImage',
'VirtualMachineImage',
'UsageName',
'Usage',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'Plan',
'HardwareProfile',
'ImageReference',
'KeyVaultSecretReference',
'KeyVaultKeyReference',
'DiskEncryptionSettings',
'VirtualHardDisk',
'ManagedDiskParameters',
'OSDisk',
'DataDisk',
'StorageProfile',
'AdditionalUnattendContent',
'WinRMListener',
'WinRMConfiguration',
'WindowsConfiguration',
'SshPublicKey',
'SshConfiguration',
'LinuxConfiguration',
'VaultCertificate',
'VaultSecretGroup',
'OSProfile',
'NetworkInterfaceReference',
'NetworkProfile',
'BootDiagnostics',
'DiagnosticsProfile',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineAgentInstanceView',
'DiskInstanceView',
'BootDiagnosticsInstanceView',
'VirtualMachineIdentity',
'MaintenanceRedeployStatus',
'VirtualMachineInstanceView',
'VirtualMachine',
'RollingUpgradePolicy',
'UpgradePolicy',
'ImageOSDisk',
'ImageDataDisk',
'ImageStorageProfile',
'Image',
'VirtualMachineScaleSetIdentity',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetUpdateOSProfile',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetUpdateOSDisk',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetStorageProfile',
'VirtualMachineScaleSetUpdateStorageProfile',
'ApiEntityReference',
'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings',
'VirtualMachineScaleSetPublicIPAddressConfiguration',
'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetUpdateIPConfiguration',
'VirtualMachineScaleSetNetworkConfigurationDnsSettings',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetUpdateNetworkConfiguration',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetUpdateNetworkProfile',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSetUpdateVMProfile',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetUpdate',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineStatusCodeCount',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetSku',
'VirtualMachineScaleSetVM',
'VirtualMachineHealthStatus',
'VirtualMachineScaleSetVMInstanceView',
'RollingUpgradeRunningStatus',
'RollingUpgradeProgressInfo',
'ApiErrorBase',
'InnerError',
'ApiError',
'RollingUpgradeStatusInfo',
'ComputeLongRunningOperationProperties',
'Resource',
'UpdateResource',
'SubResourceReadOnly',
'OperationStatusResponse',
'RunCommandInputParameter',
'RunCommandInput',
'RunCommandParameterDefinition',
'RunCommandDocumentBase',
'RunCommandDocument',
'RunCommandResult',
'AvailabilitySetPaged',
'VirtualMachineSizePaged',
'UsagePaged',
'ImagePaged',
'VirtualMachinePaged',
'VirtualMachineScaleSetPaged',
'VirtualMachineScaleSetSkuPaged',
'VirtualMachineScaleSetExtensionPaged',
'VirtualMachineScaleSetVMPaged',
'RunCommandDocumentBasePaged',
'StatusLevelTypes',
'OperatingSystemTypes',
'VirtualMachineSizeTypes',
'CachingTypes',
'DiskCreateOptionTypes',
'StorageAccountTypes',
'PassNames',
'ComponentNames',
'SettingNames',
'ProtocolTypes',
'ResourceIdentityType',
'MaintenanceOperationResultCodeTypes',
'UpgradeMode',
'OperatingSystemStateTypes',
'IPVersion',
'VirtualMachinePriorityTypes',
'VirtualMachineScaleSetSkuScaleType',
'RollingUpgradeStatusCode',
'RollingUpgradeActionType',
'InstanceViewTypes',
]
| {
"content_hash": "08db645130c3c3102c3d9b8ca98f145a",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 145,
"avg_line_length": 45.49473684210526,
"alnum_prop": 0.827934598179855,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "3d6e82bd216d6bad3efb717c8d9698e0d605637a",
"size": "13440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""SCons.Tool.masm
Tool-specific initialization for the Microsoft Assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
import SCons.Tool
import SCons.Util
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for masm to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
shared_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
shared_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['AS'] = 'ml'
env['ASFLAGS'] = SCons.Util.CLVar('/nologo')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS /c /Fo$TARGET $SOURCES'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c /Fo$TARGET $SOURCES'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('ml')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "f1d5ab084f380050d341569e7716c889",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 100,
"avg_line_length": 37.66233766233766,
"alnum_prop": 0.7196551724137931,
"repo_name": "timj/scons",
"id": "b9d88cd190cecae01cacecb433554f0855e0dbf4",
"size": "2900",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Tool/masm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import tkinter as tk
from PIL import Image
from PIL import ImageTk
class Program(tk.Tk):
def __init__(self, *args,**kwargs):
tk.Tk.__init__(self,*args,**kwargs)
container = tk.Frame(self)
container.pack(side='top',fill='both', expand=True)
container.grid_rowconfigure(0,weight=1)
container.grid_columnconfigure(0,weight=1)
self.frames = {}
Frames = (LoginPage, StartPage)
for F in Frames:
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column = 0, sticky="nsew")
self.ShowF(LoginPage)
def ShowF(self, cont):
frame = self.frames[cont]
frame.tkraise()
class LoginPage(tk.Frame):
def __init__(self,parent,controller):
self.controller = controller
#background = Image.open('GUI/images/free-abstract-background-25.jpeg')
#tkimage = ImageTk.PhotoImage(background)
tk.Frame.__init__(self,parent, bg='#a1dbcd')
stats = tk.Label(self, text = 'Insira os dados para a validação', bg='#a1dbcd')
stats.pack()
lab = tk.Label(self, text = ('Usuário'), bg='#a1dbcd')
lab.pack()
self.ent = tk.Entry(self)
self.ent.pack()
lab2 = tk.Label(self, text = ('Senha'), bg='#a1dbcd')
lab2.pack()
self.ent2 = tk.Entry(self, show='*')
self.ent2.pack()
but = tk.Button(self, text = 'Validar', bg='#a1dbcd', command = self.Validacao)
but.pack()
self.lab3 = tk.Label(self, text = '', bg='#a1dbcd')
self.lab3.pack()
def Validacao(self):
user = self.ent.get()
passw = self.ent2.get()
print ('user: ',user,' passw ',passw)
self.lab3['text'] = ('Validação concluída!')
self.controller.ShowF(StartPage) #The problem is here(I think)
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Start Page")
label.pack(pady=10, padx=10)
button = tk.Button(self, text="Button1")
button.pack()
buttona = tk.Button(self, text="Button2")
buttona.pack()
app = Program()
app.mainloop() | {
"content_hash": "40d3d8b4ed89b7f29b725886517161ff",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 87,
"avg_line_length": 33.417910447761194,
"alnum_prop": 0.5757034390352836,
"repo_name": "GeorgeManakanatas/PPDM",
"id": "cfb58345346022545b51b203851ff48fde7ec20f",
"size": "2245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GUI/test2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11748"
},
{
"name": "Python",
"bytes": "75614"
}
],
"symlink_target": ""
} |
import unittest
import os
from yambopy.dbs.savedb import YamboSaveDB
from qepy.lattice import Path
test_path = os.path.join(os.path.dirname(__file__),'..','..','data','refs','gw_conv')
class TestYamboSaveDB(unittest.TestCase):
def test_yambosavedb(self):
""" test savedb """
#open savedb
folder = os.path.join(test_path,'SAVE')
ys = YamboSaveDB.from_db_file(folder=folder)
ys.get_fermi()
ys.write_kpoints()
str(ys)
def tearDown(self):
os.remove('kpts_full.dat')
os.remove('kpts.dat')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4ef9ba523bbfb9968aacf4148031169d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.6071428571428571,
"repo_name": "alexmoratalla/yambopy",
"id": "1bb888333308696928c6706bbffb6ac556f4eccb",
"size": "728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yambopy/dbs/tests/test_savedb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "887890"
},
{
"name": "Shell",
"bytes": "1296"
}
],
"symlink_target": ""
} |
from celery.task import task, Task, subtask
import datetime, time, pytz
from django.utils import timezone
from models import ReaperConfig, RoomConfig, RoomStatus, ReaperLog
from models import PseudoRoomMotion, PseudoRoomResv
from django.db import transaction
from gcal_v3_util import GCalendar
from motion import call_motion_api
from motion_new import get_occupancy
from mysite.settings import OCC_THRESHOLD
# max vacancy (secs) allowed before treating room / fixture as unoccupied
if not OCC_THRESHOLD or OCC_THRESHOLD <=0:
OCC_THRESHOLD = 300
else:
OCC_THRESHOLD = OCC_THRESHOLD * 60;
class DebugTask(Task):
abstract = True
def after_return(self, *args, **kwargs):
print("Task returned: %r" % (self.request, ))
#print("Task returned: task: %r args: %r kwargs: %r"
# % (self.request.task, self.request.args, self.request.kwargs, ))
@task
def add(x, y):
return x + y
@task(ignore_result=True, base=DebugTask)
def poller():
#for each room:
#process_room --> get motion api --> get reservation status -->reap --> notify -->update_room_status
rc = None
for r in ReaperConfig.objects.all():
rc = r
break
rooms = RoomConfig.objects.filter(motion_enabled=True)
for rm in rooms:
print ("loop --> room_id: %d" %(rm.id))
room={'room_id':rm.id, 'calendar_id':rm.g_calendar_id, 'name':rm.name, 'location_id':rm.rw_location_id}
room['rw_api_url']=rc.rw_api_url
room['rw_api_user']=rc.rw_api_user
room['rw_api_pwd']=rc.rw_api_pwd
process_room.delay(room)
return True
@task(ignore_result=True, base=DebugTask)
def process_room(room):
# Create room status entry if not exists and commit !!
# Call director api
print ("process_room --> room_id: %d" %(room['room_id']))
#check if room status record exists. If not create initial record
create_room_status(room)
result = fetch_occ_from_director.delay(room, callback=subtask(fetch_rsv_from_cal,
callback=subtask(reap)))
return result
@task(ignore_result=True)
def fetch_occ_from_director(room,callback=None):
print ("fetch_occ_from_director --> room_id: %d" %(room['room_id']))
(occupied_pct, motion_instant) = get_current_pct_occupancy(room)
curr_motion_time = datetime.datetime.utcfromtimestamp(motion_instant).replace(tzinfo=pytz.timezone('utc'))
if callback:
subtask(callback).delay(room, curr_motion_time, occupied_pct)
return True
@task(ignore_result=True)
def fetch_rsv_from_cal(room, curr_motion_time, occupied_pct, callback=None):
rc = None
for r in ReaperConfig.objects.all():
rc = r
break
#initialize google calendar api
gcal = GCalendar(rc.g_consumer_key, rc.g_consumer_secret, rc.g_admin_user_email, rc.g_developer_key)
if not gcal:
print("Google Calendar API not initialized")
return None
#Get reservation if found at current time.
#event = gcal.getCurrentEventFromCalendar(room['calendar_id'])
(event, next_event) = gcal.getCurrentOrNextEventFromCalendar(room['calendar_id'])
if event and event.has_key('id'):
rsv_status=True
else:
rsv_status=False
print ("fetch_rsv_from_cal --> room_id: %d curr_motion_time: %r rsv_status: %d"
%(room['room_id'], curr_motion_time, rsv_status))
if callback:
subtask(callback).delay(room, rsv_status, event, curr_motion_time, occupied_pct, next_event)
return rsv_status
@task(ignore_result=True)
def reap(room, rsv_status, event, curr_motion_time, occupied_pct, next_event):
print ("reap --> room_id: %d curr_motion_time: %r rsv_status: %d occ_pct %f"
%(room['room_id'], curr_motion_time, rsv_status, occupied_pct))
current_time = timezone.now()
print "current time (UTC): %r" %(current_time)
#get timeout from reaperconfig
reaper_config = None
for r in ReaperConfig.objects.all():
reaper_config = r
break
#if api did not return motion time for some reason, then set it to current time
#to prevent greedy reaps
if not curr_motion_time:
curr_motion_time = current_time
#calculate vacant_secs
if curr_motion_time:
vacant_secs = get_pos_time_diff_secs(current_time, curr_motion_time)
else:
vacant_secs = None
#Calculate occ_status
#vacant_secs > 120 secs, occ_status = False
#if vacant_secs > 10:
if vacant_secs > OCC_THRESHOLD:
occ_status = False
else:
occ_status = True
#Reaper Logic
time_to_reap = None
reaped = False
avail_until = None
if rsv_status:
occ_timeout_secs = reaper_config.occ_motion_timeout * 60
event_start_secs = get_pos_time_diff_secs(current_time,event['start_time'])
print "occ_timeout_secs: %f vacant_secs: %f sec_fr_event_start: %f" %(occ_timeout_secs, vacant_secs, event_start_secs)
time_to_reap = None
reaped = False
#use the smaller number as the comparator
if vacant_secs < event_start_secs:
cmp_secs = vacant_secs
else:
cmp_secs = event_start_secs
# wait till occ_timeout_secs from vacant time or from start of meeting which ever is less
if (cmp_secs >= occ_timeout_secs):
print "OK to reap"
time_to_reap = 0
#initialize google calendar api
gcal = GCalendar(reaper_config.g_consumer_key, reaper_config.g_consumer_secret,
reaper_config.g_admin_user_email, reaper_config.g_developer_key)
if not gcal:
print("Google Calendar API not initialized")
return None
reaped = gcal.deleteEventFromCalendar(room['calendar_id'], event['id'], send_notification=True)
occ_status=False #just in case
rsv_status=False
else:
time_to_reap = occ_timeout_secs - cmp_secs
if time_to_reap:
print "time_to_reap: %f" %(time_to_reap)
else:
print "time_to_reap: None"
#room is available till next event
if next_event and 'start_time' in next_event:
avail_until = next_event['start_time']
#Update room status
if rsv_status and not reaped:
booked_until = None
if event.has_key('booked_until'):
booked_until = event['booked_until']
else:
booked_until = event['end_time']
update_room_status(room,
occupied = occ_status,
reserved = rsv_status,
last_motion_ts = curr_motion_time,
last_checked_ts = current_time,
time_to_reap = time_to_reap,
vacant_secs = vacant_secs,
occupancy_pct = occupied_pct,
booked_until = booked_until,
avail_until = None,
rsv_begin_time = event['start_time'],
rsv_end_time = event['end_time'],
rsv_owner_email = event['owner_email'],
rsv_owner_name = event['owner_name'],
event_timezone = event['timezone'],
allow_res=False
)
else:
update_room_status(room,
occupied = occ_status,
reserved = rsv_status,
last_motion_ts = curr_motion_time,
last_checked_ts = current_time,
time_to_reap = None,
vacant_secs = None,
occupancy_pct = occupied_pct,
booked_until = None,
avail_until = avail_until,
rsv_begin_time = None,
rsv_end_time = None,
rsv_owner_email = None,
rsv_owner_name = None,
event_timezone = None,
allow_res=True
)
#Notify on cancellation and write to log
if reaped:
notify_on_event.subtask().delay(room,event,action='CANCEL_RSV')
@task(ignore_result=True)
def notify_on_event(room,event,action):
#notify
#add to log
print ("notify-->room_id %d event %r" %(room['room_id'], action))
if action =='CANCEL_RSV':
log_reap_event(room,event)
return True
#Utility Methods
def get_current_motion(room, pseudo=False):
if pseudo:
motion = PseudoRoomMotion.objects.get(id=room['room_id'])
return motion.curr_motion_time
else:
instant = call_motion_api(room['rw_api_url'],room['rw_api_user'],room['rw_api_pwd'],
room['location_id'])
#create a timezone aware datetime from instant
dt = datetime.datetime.utcfromtimestamp(instant).replace(tzinfo=pytz.timezone('utc'))
#print "instant=%d dt=%r"%(instant,dt)
return dt
#convert from UNIX timestamp (secs. since epoch) to aware datetime
#time.mktime(datetime.datetime.now().timetuple())
#return (datetime.datetime.fromtimestamp(motion.motion_instant))
def get_current_pct_occupancy(room):
current_time = timezone.now()
(occ_pct, motion_instant) = get_occupancy(room['rw_api_url'],room['rw_api_user'],room['rw_api_pwd'],room['location_id'], current_time, OCC_THRESHOLD)
return (occ_pct, motion_instant)
def get_current_room_resv(room_id, gcal):
rr=None
for r in PseudoRoomResv.objects.filter(id=room_id):
rr=r
if rr:
return (rr.reserved, rr.rsv_begin_time, rr.rsv_end_time, rr.rsv_owner_id)
else:
return (False, None, None, None)
@transaction.commit_manually
def create_room_status(room):
#create room status record if one does not exist
try:
res = RoomConfig.objects.filter(id=room['room_id'])
rc=None
for r in res:
rc=r
if not rc:
return
res = RoomStatus.objects.filter(room_id=room['room_id'])
rm = None
for r in res:
rm = r
break
if not rm:
rm = RoomStatus(room=rc,
occupied=False,
reserved=False,
last_motion_ts=None,
last_checked_ts=None,
vacant_secs=None,
time_to_reap=None,
occupancy_pct=None,
avail_until=None,
booked_until=None,
rsv_begin_time=None,
rsv_end_time=None,
rsv_owner_email=None,
rsv_owner_name=None,
event_timezone=None,
allow_res=True
)
rm.save()
except:
transaction.rollback()
raise
else:
transaction.commit()
return True
@transaction.commit_manually
def update_room_status(room, **kwargs):
rm=None
for r in RoomStatus.objects.filter(room_id=room['room_id']):
rm =r
break
if rm:
try:
if 'occupied' in kwargs:
rm.occupied=kwargs['occupied']
if 'reserved' in kwargs:
rm.reserved=kwargs['reserved']
if 'last_motion_ts' in kwargs:
rm.last_motion_ts=kwargs['last_motion_ts']
if 'last_checked_ts' in kwargs:
rm.last_checked_ts=kwargs['last_checked_ts']
if 'vacant_secs' in kwargs:
rm.vacant_secs=kwargs['vacant_secs']
if 'time_to_reap' in kwargs:
rm.time_to_reap=kwargs['time_to_reap']
if 'rsv_begin_time' in kwargs:
rm.rsv_begin_time=kwargs['rsv_begin_time']
if 'rsv_end_time' in kwargs:
rm.rsv_end_time=kwargs['rsv_end_time']
if 'rsv_owner_email' in kwargs:
rm.rsv_owner_email=kwargs['rsv_owner_email']
if 'rsv_owner_name' in kwargs:
rm.rsv_owner_name=kwargs['rsv_owner_name']
if 'event_timezone' in kwargs:
rm.event_timezone=kwargs['event_timezone']
if 'allow_res' in kwargs:
rm.allow_res=kwargs['allow_res']
if 'occupancy_pct' in kwargs:
rm.occupancy_pct=kwargs['occupancy_pct']
if 'avail_until' in kwargs:
rm.avail_until=kwargs['avail_until']
if 'booked_until' in kwargs:
rm.booked_until=kwargs['booked_until']
rm.save()
except:
transaction.rollback()
raise
else:
transaction.commit()
return True
def get_pos_time_diff_secs(dt1, dt2):
#Assumes that both dates are timzone aware and stored in UTC
diff = dt1 - dt2
if diff.days >=0:
return diff.seconds
else:
return 0
@transaction.commit_manually
def log_reap_event(room,event):
try:
dbLog = ReaperLog()
if event and event.has_key('id'):
dbLog.g_event_id = event['id']
dbLog.rsv_begin_time = event['start_time']
dbLog.rsv_end_time = event['end_time']
dbLog.rsv_owner_email = event['owner_email']
dbLog.rsv_owner_name = event['owner_name']
dbLog.room_id = room['room_id']
dbLog.room_calendar_id = room['calendar_id']
dbLog.room_name = room['name']
dbLog.save()
except:
transaction.rollback()
raise
else:
transaction.commit()
return True
| {
"content_hash": "f5a90d5e6c46362393272037c3bde276",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 153,
"avg_line_length": 33.71461716937355,
"alnum_prop": 0.5387791617920308,
"repo_name": "redwoodsystems/GoogleCalendar-Connector",
"id": "c75dddd51bfd95f350dc28435370ee5fd11479b2",
"size": "14531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reaper/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "111249"
},
{
"name": "Python",
"bytes": "71296"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import pandas as pd
test_data = pd.read_csv('../../data/test.csv', dtype={"Age": np.float64})
| {
"content_hash": "cdc783441f973d6dd5c8f40a6e3e6c77",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 25.5,
"alnum_prop": 0.6862745098039216,
"repo_name": "wojtekwalczak/kaggle_titanic",
"id": "8a5b83ebad1431db2848e46c12235c9058cb7787",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "titanic/db/test_loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24286"
}
],
"symlink_target": ""
} |
"""
======================================================================
Point groups (:mod:`sknano.utils.symmetry_groups._point_groups`)
======================================================================
.. currentmodule:: sknano.utils.symmetry_groups._point_groups
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
__all__ = ['PointGroup']
class PointGroup:
pass
| {
"content_hash": "408efe5885b3e34a8f25088760d90697",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 28.058823529411764,
"alnum_prop": 0.5031446540880503,
"repo_name": "androomerrill/scikit-nano",
"id": "3ba7d844daa72193c1951d20b59ee8c29591d9e0",
"size": "501",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sknano/utils/symmetry_groups/_point_groups.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1276156"
},
{
"name": "Shell",
"bytes": "2169"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class l3param(base_resource) :
""" Configuration for Layer 3 related parameter resource. """
def __init__(self) :
self._srcnat = ""
self._icmpgenratethreshold = 0
self._overridernat = ""
self._dropdfflag = ""
self._miproundrobin = ""
self._externalloopback = ""
self._tnlpmtuwoconn = ""
self._usipserverstraypkt = ""
self._forwardicmpfragments = ""
self._dropipfragments = ""
self._acllogtime = 0
self._icmperrgenerate = ""
@property
def srcnat(self) :
"""Perform NAT if only the source is in the private network.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._srcnat
except Exception as e:
raise e
@srcnat.setter
def srcnat(self, srcnat) :
"""Perform NAT if only the source is in the private network.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._srcnat = srcnat
except Exception as e:
raise e
@property
def icmpgenratethreshold(self) :
"""NS generated ICMP pkts per 10ms rate threshold.<br/>Default value: 100.
"""
try :
return self._icmpgenratethreshold
except Exception as e:
raise e
@icmpgenratethreshold.setter
def icmpgenratethreshold(self, icmpgenratethreshold) :
"""NS generated ICMP pkts per 10ms rate threshold.<br/>Default value: 100
"""
try :
self._icmpgenratethreshold = icmpgenratethreshold
except Exception as e:
raise e
@property
def overridernat(self) :
"""USNIP/USIP settings override RNAT settings for configured
service/virtual server traffic.. .<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._overridernat
except Exception as e:
raise e
@overridernat.setter
def overridernat(self, overridernat) :
"""USNIP/USIP settings override RNAT settings for configured
service/virtual server traffic.. .<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._overridernat = overridernat
except Exception as e:
raise e
@property
def dropdfflag(self) :
"""Enable dropping the IP DF flag.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dropdfflag
except Exception as e:
raise e
@dropdfflag.setter
def dropdfflag(self, dropdfflag) :
"""Enable dropping the IP DF flag.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dropdfflag = dropdfflag
except Exception as e:
raise e
@property
def miproundrobin(self) :
"""Enable round robin usage of mapped IPs.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._miproundrobin
except Exception as e:
raise e
@miproundrobin.setter
def miproundrobin(self, miproundrobin) :
"""Enable round robin usage of mapped IPs.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._miproundrobin = miproundrobin
except Exception as e:
raise e
@property
def externalloopback(self) :
"""Enable external loopback.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._externalloopback
except Exception as e:
raise e
@externalloopback.setter
def externalloopback(self, externalloopback) :
"""Enable external loopback.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._externalloopback = externalloopback
except Exception as e:
raise e
@property
def tnlpmtuwoconn(self) :
"""Enable external loopback.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._tnlpmtuwoconn
except Exception as e:
raise e
@tnlpmtuwoconn.setter
def tnlpmtuwoconn(self, tnlpmtuwoconn) :
"""Enable external loopback.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._tnlpmtuwoconn = tnlpmtuwoconn
except Exception as e:
raise e
@property
def usipserverstraypkt(self) :
"""Enable detection of stray server side pkts in USIP mode.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._usipserverstraypkt
except Exception as e:
raise e
@usipserverstraypkt.setter
def usipserverstraypkt(self, usipserverstraypkt) :
"""Enable detection of stray server side pkts in USIP mode.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._usipserverstraypkt = usipserverstraypkt
except Exception as e:
raise e
@property
def forwardicmpfragments(self) :
"""Enable forwarding of ICMP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._forwardicmpfragments
except Exception as e:
raise e
@forwardicmpfragments.setter
def forwardicmpfragments(self, forwardicmpfragments) :
"""Enable forwarding of ICMP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._forwardicmpfragments = forwardicmpfragments
except Exception as e:
raise e
@property
def dropipfragments(self) :
"""Enable dropping of IP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dropipfragments
except Exception as e:
raise e
@dropipfragments.setter
def dropipfragments(self, dropipfragments) :
"""Enable dropping of IP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dropipfragments = dropipfragments
except Exception as e:
raise e
@property
def acllogtime(self) :
"""Parameter to tune acl logging time.<br/>Default value: 5000.
"""
try :
return self._acllogtime
except Exception as e:
raise e
@acllogtime.setter
def acllogtime(self, acllogtime) :
"""Parameter to tune acl logging time.<br/>Default value: 5000
"""
try :
self._acllogtime = acllogtime
except Exception as e:
raise e
@property
def icmperrgenerate(self) :
"""Enable/Disable fragmentation required icmp error generation, before encapsulating a packet with vPath header. This knob is only functional for vPath Environment.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._icmperrgenerate
except Exception as e:
raise e
@icmperrgenerate.setter
def icmperrgenerate(self, icmperrgenerate) :
"""Enable/Disable fragmentation required icmp error generation, before encapsulating a packet with vPath header. This knob is only functional for vPath Environment.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._icmperrgenerate = icmperrgenerate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(l3param_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.l3param
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update l3param.
"""
try :
if type(resource) is not list :
updateresource = l3param()
updateresource.srcnat = resource.srcnat
updateresource.icmpgenratethreshold = resource.icmpgenratethreshold
updateresource.overridernat = resource.overridernat
updateresource.dropdfflag = resource.dropdfflag
updateresource.miproundrobin = resource.miproundrobin
updateresource.externalloopback = resource.externalloopback
updateresource.tnlpmtuwoconn = resource.tnlpmtuwoconn
updateresource.usipserverstraypkt = resource.usipserverstraypkt
updateresource.forwardicmpfragments = resource.forwardicmpfragments
updateresource.dropipfragments = resource.dropipfragments
updateresource.acllogtime = resource.acllogtime
updateresource.icmperrgenerate = resource.icmperrgenerate
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of l3param resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = l3param()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the l3param resources that are configured on netscaler.
"""
try :
if not name :
obj = l3param()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Icmperrgenerate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dropipfragments:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Overridernat:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Tnlpmtuwoconn:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Usipserverstraypkt:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Srcnat:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Externalloopback:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Forwardicmpfragments:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dropdfflag:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Miproundrobin:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class l3param_response(base_response) :
def __init__(self, length=1) :
self.l3param = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.l3param = [l3param() for _ in range(length)]
| {
"content_hash": "2b46537f2b34fff2a79aa387bea6b426",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 234,
"avg_line_length": 28.78904109589041,
"alnum_prop": 0.7215454891511229,
"repo_name": "mahabs/nitro",
"id": "90d1dc483daf4363c800e8bad1315695ebb71e60",
"size": "11122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/network/l3param.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
from collections import defaultdict
class HypGraph(object):
def __init__(self):
self.nodes = defaultdict(str) # {id = label}
self.edges = [] # (parent_node_id, child_node_id)
self.costs = defaultdict(float) # {node_id = cost}
self.word_probs = defaultdict(float) # {node_id = word_prob}
def get_id(self, word, history):
if history == []:
return str(word)
history = '-'.join([str(h) for h in reversed(history)])
return '%s-%s' % (word, history)
def get_ids(self, words):
ids = []
for i, w in enumerate(words):
history = words[:i]
ids.append(self.get_id(w, history))
return ids
def add(self, word, history, word_prob=None, cost=None):
history_labels = [0] + history
history_ids = self.get_ids(history_labels)
word_label = word
word_id = self.get_id(word_label, history_labels)
# store
self.nodes[word_id] = word_label
self.edges.append((history_ids[-1], word_id))
if word_prob != None:
self.word_probs[word_id] = word_prob
if cost != None:
self.costs[word_id] = cost
class HypGraphRenderer(object):
def __init__(self, hyp_graph):
self.nodes = hyp_graph.nodes
self.edges = hyp_graph.edges
self.costs = hyp_graph.costs
self.word_probs = hyp_graph.word_probs
# constants
self.BOS_SYMBOLS = ['0']
self.EOS_SYMBOLS = ['<eos>']
def _escape_label(self, label):
replacements = {
'<': '\<',
'>': '\>',
}
for original, replacement in replacements.iteritems():
label = label.replace(original, replacement)
return label
def _render(self, costs=False, word_probs=False, highlight_best=False):
from pygraphviz import AGraph
graph = AGraph(directed=True)
for node_id, node_label in self.nodes.iteritems():
attributes = self._node_attr(node_id, costs=costs, word_probs=word_probs)
graph.add_node(node_id, **attributes)
for (parent_node_id, child_node_id) in self.edges:
graph.add_edge(parent_node_id, child_node_id)
self.graph = graph
if highlight_best:
self._highlight_best()
def _node_attr(self, node_id, costs=False, word_probs=False):
word = self.nodes[node_id].decode('utf-8')
cost = self.costs[node_id]
prob = self.word_probs[node_id]
attr = {}
if costs and word_probs:
attr['shape'] = "record"
attr['label'] = "{{%s|%.3f}|%.3f}" % (word, prob, cost)
elif costs:
attr['shape'] = "record"
attr['label'] = "{{%s}|%.3f}" % (word, cost)
elif word_probs:
attr['shape'] = "record"
attr['label'] = "{{%s|%.3f}}" % (word, prob)
else:
attr['label'] = word
attr['label'] = self._escape_label(attr['label'])
return attr
def _highlight_best(self):
best_hyp_bg_color = '#CDE9EC'
best_hyp_cost = None
best_hyp_leaf_node_id = None
for node_id, label in self.nodes.iteritems():
if label in self.EOS_SYMBOLS:
if best_hyp_cost == None or self.costs[node_id] < best_hyp_cost:
best_hyp_leaf_node_id = node_id
best_hyp_cost = self.costs[node_id]
if best_hyp_leaf_node_id:
best_hyp_leaf_node = self.graph.get_node(best_hyp_leaf_node_id)
current_node = best_hyp_leaf_node
while current_node != []:
current_node.attr['style'] = 'filled'
current_node.attr['fillcolor'] = best_hyp_bg_color
try:
current_node = self.graph.predecessors(current_node)[0]
except IndexError:
break
def wordify(self, word_dict):
"""
Replace node labels (usually integers) with words, subwords, or
characters.
"""
for node_id, label in self.nodes.iteritems():
self.nodes[node_id] = word_dict[label]
def save_png(self, filepath, detailed=False, highlight_best=False):
"""
Renders the graph as PNG image.
@param filepath the taget file
@param detailed whether to include word probabilities and
hypothesis costs.
@param highlight_best whether to highlight the best hypothesis.
"""
costs = True if detailed else False
word_probs = True if detailed else False
self._render(costs=costs, word_probs=word_probs, highlight_best=highlight_best)
self.graph.draw(filepath, prog="dot")
| {
"content_hash": "9c56daedf82d0cd0f8f62fe97f0f333d",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 97,
"avg_line_length": 32.4296875,
"alnum_prop": 0.6357504215851602,
"repo_name": "shuoyangd/nematus",
"id": "d0dffad9b28e15e2450c8a18918ef4b2493772c7",
"size": "4198",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nematus/hypgraph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "17034"
},
{
"name": "JavaScript",
"bytes": "17029"
},
{
"name": "NewLisp",
"bytes": "1582"
},
{
"name": "PHP",
"bytes": "10635"
},
{
"name": "Perl",
"bytes": "28465"
},
{
"name": "Python",
"bytes": "224410"
},
{
"name": "Ruby",
"bytes": "1649"
},
{
"name": "Shell",
"bytes": "9914"
},
{
"name": "Slash",
"bytes": "356"
},
{
"name": "Smalltalk",
"bytes": "1892"
},
{
"name": "SystemVerilog",
"bytes": "184"
}
],
"symlink_target": ""
} |
import logging
import pyauto_functional
import chromeos_network # pyauto_functional must come before chromeos_network
class ChromeosWifi(chromeos_network.PyNetworkUITest):
"""Tests for ChromeOS wifi."""
def testNetworkInfoAndScan(self):
"""Get basic info on networks."""
# NetworkScan will also call GetNetworkInfo and return the results.
result = self.NetworkScan()
self.assertTrue(result)
logging.debug(result)
def testGetProxySettings(self):
"""Print some information about proxy settings."""
result = self.GetProxySettingsOnChromeOS()
self.assertTrue(result)
logging.debug(result)
def testConnectToHiddenWiFiNonExistent(self):
"""Connecting to a non-existent network should fail.
Assume network 'ThisIsANonExistentNetwork' is not a valid ssid within
the vicinity of where this test is run.
"""
ssid = 'ThisIsANonExistentNetwork'
error = self.ConnectToHiddenWifiNetwork(ssid, 'SECURITY_NONE')
self.assertTrue(error, msg='Device connected to a non-existent '
'network "%s".' % ssid)
if __name__ == '__main__':
pyauto_functional.Main()
| {
"content_hash": "c95e0aaf4486ab92d8c87c70f12b32b2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.7027491408934707,
"repo_name": "Crystalnix/house-of-life-chromium",
"id": "e757ebfbeef543d3cce9de3ddf5697f4b2b4b591",
"size": "1349",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chrome/test/functional/chromeos_wifi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "3418"
},
{
"name": "C",
"bytes": "88445923"
},
{
"name": "C#",
"bytes": "73756"
},
{
"name": "C++",
"bytes": "77228136"
},
{
"name": "Emacs Lisp",
"bytes": "6648"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "Java",
"bytes": "11354"
},
{
"name": "JavaScript",
"bytes": "6191433"
},
{
"name": "Objective-C",
"bytes": "4023654"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "92217"
},
{
"name": "Python",
"bytes": "5604932"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "1234672"
},
{
"name": "Tcl",
"bytes": "200213"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from . import get_data_home
from .tools import download_with_progress_bar
DATA_URL = ('http://lambda.gsfc.nasa.gov/data/map/dr4/'
'skymaps/7yr/raw/wmap_band_imap_r9_7yr_W_v4.fits')
MASK_URL = ('http://lambda.gsfc.nasa.gov/data/map/dr4/'
'ancillary/masks/wmap_temperature_analysis_mask_r9_7yr_v4.fits')
def fetch_wmap_temperatures(masked=False, data_home=None,
download_if_missing=True):
"""Loader for WMAP temperature map data
Parameters
----------
masked : optional, default=False
If True, then return the foreground-masked healpix array of data
If False, then return the raw temperature array
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all astroML data is stored in '~/astroML_data'.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : np.ndarray or np.ma.MaskedArray
record array containing (masked) temperature data
"""
# because of a bug in healpy, pylab must be imported before healpy is
# or else a segmentation fault can result.
import healpy as hp
data_home = get_data_home(data_home)
data_file = os.path.join(data_home, os.path.basename(DATA_URL))
mask_file = os.path.join(data_home, os.path.basename(MASK_URL))
if not os.path.exists(data_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
data_buffer = download_with_progress_bar(DATA_URL)
open(data_file, 'wb').write(data_buffer)
data = hp.read_map(data_file)
if masked:
if not os.path.exists(mask_file):
if not download_if_missing:
raise IOError('mask data not present on disk. '
'set download_if_missing=True to download')
mask_buffer = download_with_progress_bar(MASK_URL)
open(mask_file, 'wb').write(mask_buffer)
mask = hp.read_map(mask_file)
data = hp.ma(data)
data.mask = np.logical_not(mask) # WMAP mask has 0=bad. We need 1=bad
return data
| {
"content_hash": "e8d9b3f355765f611e44fb61e11f39e7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 36.8,
"alnum_prop": 0.6362876254180602,
"repo_name": "astroML/astroML",
"id": "e21583778ca2258d846fffa26659b567c30090f2",
"size": "2392",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astroML/datasets/wmap_temperatures.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "649"
},
{
"name": "Python",
"bytes": "336942"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from tweepy.models import ModelFactory
from tweepy.utils import import_simplejson
from tweepy.error import TweepError
class Parser(object):
def parse(self, method, payload):
"""
Parse the response payload and return the result.
Returns a tuple that contains the result data and the cursors
(or None if not present).
"""
raise NotImplementedError
def parse_error(self, payload):
"""
Parse the error message and api error code from payload.
Return them as an (error_msg, error_code) tuple. If unable to parse the
message, throw an exception and default error message will be used.
"""
raise NotImplementedError
class RawParser(Parser):
def __init__(self):
pass
def parse(self, method, payload):
return payload
def parse_error(self, payload):
return payload
class JSONParser(Parser):
payload_format = 'json'
def __init__(self):
self.json_lib = import_simplejson()
def parse(self, method, payload):
try:
json = self.json_lib.loads(payload)
except Exception as e:
raise TweepError('Failed to parse JSON payload: %s' % e)
needs_cursors = 'cursor' in method.session.params
if needs_cursors and isinstance(json, dict) \
and 'previous_cursor' in json \
and 'next_cursor' in json:
cursors = json['previous_cursor'], json['next_cursor']
return json, cursors
else:
return json
def parse_error(self, payload):
error_object = self.json_lib.loads(payload)
if 'error' in error_object:
reason = error_object['error']
api_code = error_object.get('code')
else:
reason = error_object['errors']
api_code = [error.get('code') for error in
reason if error.get('code')]
api_code = api_code[0] if len(api_code) == 1 else api_code
return reason, api_code
class ModelParser(JSONParser):
def __init__(self, model_factory=None):
JSONParser.__init__(self)
self.model_factory = model_factory or ModelFactory
def parse(self, method, payload):
try:
if method.payload_type is None:
return
model = getattr(self.model_factory, method.payload_type)
except AttributeError:
raise TweepError('No model for this payload type: '
'%s' % method.payload_type)
json = JSONParser.parse(self, method, payload)
if isinstance(json, tuple):
json, cursors = json
else:
cursors = None
if method.payload_list:
result = model.parse_list(method.api, json)
else:
result = model.parse(method.api, json)
if cursors:
return result, cursors
else:
return result
| {
"content_hash": "25aede723b88e4133591a0e013ece34a",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 28.80952380952381,
"alnum_prop": 0.583801652892562,
"repo_name": "gemmaan/moviesenal",
"id": "046cf50994e10ac7695a8585d6059706b03109f0",
"size": "3101",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tweepy/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1264"
},
{
"name": "C",
"bytes": "488967"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "3387200"
},
{
"name": "Tcl",
"bytes": "1237789"
}
],
"symlink_target": ""
} |
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from collections import defaultdict
import urllib.request
import re
# ______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in self.categories[word]
def __repr__(self):
return '<Grammar {}>'.format(self.name)
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Figure 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause', # noqa
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Figure 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east", # noqa
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back", # noqa
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
# ______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Figure 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge." # noqa
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"Add to chart any rules for B that could help extend this edge."
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# ______________________________________________________________________________
# CYK Parsing
def CYK_parse(words, grammar):
"[Figure 23.5]"
# We use 0-based indexing instead of the book's 1-based.
N = len(words)
P = defaultdict(float)
# Insert lexical rules for each word.
for (i, word) in enumerate(words):
for (X, p) in grammar.categories[word]: # XXX grammar.categories needs changing, above
P[X, i, 1] = p
# Combine first and second parts of right-hand sides of rules,
# from short to long.
for length in range(2, N+1):
for start in range(N-length+1):
for len1 in range(1, length): # N.B. the book incorrectly has N instead of length
len2 = length - len1
for (X, Y, Z, p) in grammar.cnf_rules(): # XXX grammar needs this method
P[X, start, length] = max(P[X, start, length],
P[Y, start, len1] * P[Z, start+len1, len2] * p)
return P
# ______________________________________________________________________________
# Page Ranking
# First entry in list is the base URL, and then following are relative URL pages
examplePagesSet = ["https://en.wikipedia.org/wiki/", "Aesthetics", "Analytic_philosophy",
"Ancient_Greek", "Aristotle", "Astrology", "Atheism", "Baruch_Spinoza",
"Belief", "Betrand Russell", "Confucius", "Consciousness",
"Continental Philosophy", "Dialectic", "Eastern_Philosophy",
"Epistemology", "Ethics", "Existentialism", "Friedrich_Nietzsche",
"Idealism", "Immanuel_Kant", "List_of_political_philosophers", "Logic",
"Metaphysics", "Philosophers", "Philosophy", "Philosophy_of_mind", "Physics",
"Plato", "Political_philosophy", "Pythagoras", "Rationalism",
"Social_philosophy", "Socrates", "Subjectivity", "Theology",
"Truth", "Western_philosophy"]
def loadPageHTML(addressList):
"""Download HTML page content for every URL address passed as argument"""
contentDict = {}
for addr in addressList:
with urllib.request.urlopen(addr) as response:
raw_html = response.read().decode('utf-8')
# Strip raw html of unnessecary content. Basically everything that isn't link or text
html = stripRawHTML(raw_html)
contentDict[addr] = html
return contentDict
def initPages(addressList):
"""Create a dictionary of pages from a list of URL addresses"""
pages = {}
for addr in addressList:
pages[addr] = Page(addr)
return pages
def stripRawHTML(raw_html):
"""Remove the <head> section of the HTML which contains links to stylesheets etc.,
and remove all other unnessecary HTML"""
# TODO: Strip more out of the raw html
return re.sub("<head>.*?</head>", "", raw_html, flags=re.DOTALL) # remove <head> section
def determineInlinks(page):
"""Given a set of pages that have their outlinks determined, we can fill
out a page's inlinks by looking through all other page's outlinks"""
inlinks = []
for addr, indexPage in pagesIndex.items():
if page.address == indexPage.address:
continue
elif page.address in indexPage.outlinks:
inlinks.append(addr)
return inlinks
def findOutlinks(page, handleURLs=None):
"""Search a page's HTML content for URL links to other pages"""
urls = re.findall(r'href=[\'"]?([^\'" >]+)', pagesContent[page.address])
if handleURLs:
urls = handleURLs(urls)
return urls
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
# ______________________________________________________________________________
# HITS Helper Functions
def expand_pages(pages):
"""From Textbook: adds in every page that links to or is linked from one of
the relevant pages."""
expanded = {}
for addr, page in pages.items():
if addr not in expanded:
expanded[addr] = page
for inlink in page.inlinks:
if inlink not in expanded:
expanded[inlink] = pagesIndex[inlink]
for outlink in page.outlinks:
if outlink not in expanded:
expanded[outlink] = pagesIndex[outlink]
return expanded
def relevant_pages(query):
"""Relevant pages are pages that contain the query in its entireity.
If a page's content contains the query it is returned by the function."""
relevant = {}
print("pagesContent in function: ", pagesContent)
for addr, page in pagesIndex.items():
if query.lower() in pagesContent[addr].lower():
relevant[addr] = page
return relevant
def normalize(pages):
"""From the pseudocode: Normalize divides each page's score by the sum of
the squares of all pages' scores (separately for both the authority and hubs scores).
"""
summed_hub = sum(page.hub**2 for _, page in pages.items())
summed_auth = sum(page.authority**2 for _, page in pages.items())
for _, page in pages.items():
page.hub /= summed_hub
page.authority /= summed_auth
class ConvergenceDetector(object):
"""If the hub and authority values of the pages are no longer changing, we have
reached a convergence and further iterations will have no effect. This detects convergence
so that we can stop the HITS algorithm as early as possible."""
def __init__(self):
self.hub_history = None
self.auth_history = None
def __call__(self):
return self.detect()
def detect(self):
curr_hubs = [page.hub for addr, page in pagesIndex.items()]
curr_auths = [page.authority for addr, page in pagesIndex.items()]
if self.hub_history is None:
self.hub_history, self.auth_history = [], []
else:
diffsHub = [abs(x-y) for x, y in zip(curr_hubs, self.hub_history[-1])]
diffsAuth = [abs(x-y) for x, y in zip(curr_auths, self.auth_history[-1])]
aveDeltaHub = sum(diffsHub)/float(len(pagesIndex))
aveDeltaAuth = sum(diffsAuth)/float(len(pagesIndex))
if aveDeltaHub < 0.01 and aveDeltaAuth < 0.01: # may need tweaking
return True
if len(self.hub_history) > 2: # prevent list from getting long
del self.hub_history[0]
del self.auth_history[0]
self.hub_history.append([x for x in curr_hubs])
self.auth_history.append([x for x in curr_auths])
return False
def getInlinks(page):
if not page.inlinks:
page.inlinks = determineInlinks(page)
return [p for addr, p in pagesIndex.items() if addr in page.inlinks]
def getOutlinks(page):
if not page.outlinks:
page.outlinks = findOutlinks(page)
return [p for addr, p in pagesIndex.items() if addr in page.outlinks]
# ______________________________________________________________________________
# HITS Algorithm
class Page(object):
def __init__(self, address, hub=0, authority=0, inlinks=None, outlinks=None):
self.address = address
self.hub = hub
self.authority = authority
self.inlinks = inlinks
self.outlinks = outlinks
pagesContent = {} # maps Page relative or absolute URL/location to page's HTML content
pagesIndex = {}
convergence = ConvergenceDetector() # assign function to variable to mimic pseudocode's syntax
def HITS(query):
"""The HITS algorithm for computing hubs and authorities with respect to a query."""
pages = expand_pages(relevant_pages(query)) # in order to 'map' faithfully to pseudocode we
for p in pages: # won't pass the list of pages as an argument
p.authority = 1
p.hub = 1
while True: # repeat until... convergence
for p in pages:
p.authority = sum(x.hub for x in getInlinks(p)) # p.authority ← ∑i Inlinki(p).Hub
p.hub = sum(x.authority for x in getOutlinks(p)) # p.hub ← ∑i Outlinki(p).Authority
normalize(pages)
if convergence():
break
return pages
| {
"content_hash": "a09a2cb25c57512605367be5d8955aac",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 114,
"avg_line_length": 38.221105527638194,
"alnum_prop": 0.562056271364712,
"repo_name": "sofmonk/aima-python",
"id": "bf0b6a6aa8809c58143af3a146684119ab04a235",
"size": "15220",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch",
"path": "nlp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9816"
},
{
"name": "Jupyter Notebook",
"bytes": "1545410"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "363169"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
REQUIREMENTS = (
'django>=1.11,<3',
)
TEST_REQUIREMENTS = (
'mock',
'django-debug-toolbar',
)
from contact_form import VERSION
setup(
name="django-contact-form-gv",
version=VERSION,
author="Aaron Madison",
description="Django Contact Form using class based views.",
long_description=open('README.rst', 'r').read(),
url="https://github.com/madisona/django-contact-form",
packages=find_packages(exclude=["example*"]),
include_package_data=True,
install_requires=REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
test_suite='runtests.runtests',
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
)
| {
"content_hash": "a5beb58074a12cbc729b395abc9ece5d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 32.62222222222222,
"alnum_prop": 0.6151226158038147,
"repo_name": "madisona/django-contact-form",
"id": "00f29bc9ec2eafff69aa2bf7b63b036adbda43c9",
"size": "1468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "914"
},
{
"name": "Python",
"bytes": "21813"
}
],
"symlink_target": ""
} |
from . import test_account_followup
checks = [
test_account_followup,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | {
"content_hash": "48f7c303a646fbd5257c96bac12f7a83",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 20.428571428571427,
"alnum_prop": 0.7552447552447552,
"repo_name": "diogocs1/comps",
"id": "cf0ea3acbe63fd7e6460ee85059676f44a68ebf7",
"size": "1128",
"binary": false,
"copies": "173",
"ref": "refs/heads/master",
"path": "web/addons/account_followup/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
"""
Implement the cmath module functions.
"""
import cmath
import math
from numba.core.imputils import Registry, impl_ret_untracked
from numba.core import types, cgutils
from numba.core.typing import signature
from numba.cpython import builtins, mathimpl
from numba.core.extending import overload
registry = Registry('cmathimpl')
lower = registry.lower
def is_nan(builder, z):
return builder.fcmp_unordered('uno', z.real, z.imag)
def is_inf(builder, z):
return builder.or_(mathimpl.is_inf(builder, z.real),
mathimpl.is_inf(builder, z.imag))
def is_finite(builder, z):
return builder.and_(mathimpl.is_finite(builder, z.real),
mathimpl.is_finite(builder, z.imag))
@lower(cmath.isnan, types.Complex)
def isnan_float_impl(context, builder, sig, args):
[typ] = sig.args
[value] = args
z = context.make_complex(builder, typ, value=value)
res = is_nan(builder, z)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower(cmath.isinf, types.Complex)
def isinf_float_impl(context, builder, sig, args):
[typ] = sig.args
[value] = args
z = context.make_complex(builder, typ, value=value)
res = is_inf(builder, z)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower(cmath.isfinite, types.Complex)
def isfinite_float_impl(context, builder, sig, args):
[typ] = sig.args
[value] = args
z = context.make_complex(builder, typ, value=value)
res = is_finite(builder, z)
return impl_ret_untracked(context, builder, sig.return_type, res)
@overload(cmath.rect)
def impl_cmath_rect(r, phi):
if all([isinstance(typ, types.Float) for typ in [r, phi]]):
def impl(r, phi):
if not math.isfinite(phi):
if not r:
# cmath.rect(0, phi={inf, nan}) = 0
return abs(r)
if math.isinf(r):
# cmath.rect(inf, phi={inf, nan}) = inf + j phi
return complex(r, phi)
real = math.cos(phi)
imag = math.sin(phi)
if real == 0. and math.isinf(r):
# 0 * inf would return NaN, we want to keep 0 but xor the sign
real /= r
else:
real *= r
if imag == 0. and math.isinf(r):
# ditto
imag /= r
else:
imag *= r
return complex(real, imag)
return impl
def intrinsic_complex_unary(inner_func):
def wrapper(context, builder, sig, args):
[typ] = sig.args
[value] = args
z = context.make_complex(builder, typ, value=value)
x = z.real
y = z.imag
# Same as above: math.isfinite() is unavailable on 2.x so we precompute
# its value and pass it to the pure Python implementation.
x_is_finite = mathimpl.is_finite(builder, x)
y_is_finite = mathimpl.is_finite(builder, y)
inner_sig = signature(sig.return_type,
*(typ.underlying_float,) * 2 + (types.boolean,) * 2)
res = context.compile_internal(builder, inner_func, inner_sig,
(x, y, x_is_finite, y_is_finite))
return impl_ret_untracked(context, builder, sig, res)
return wrapper
NAN = float('nan')
INF = float('inf')
@lower(cmath.exp, types.Complex)
@intrinsic_complex_unary
def exp_impl(x, y, x_is_finite, y_is_finite):
"""cmath.exp(x + y j)"""
if x_is_finite:
if y_is_finite:
c = math.cos(y)
s = math.sin(y)
r = math.exp(x)
return complex(r * c, r * s)
else:
return complex(NAN, NAN)
elif math.isnan(x):
if y:
return complex(x, x) # nan + j nan
else:
return complex(x, y) # nan + 0j
elif x > 0.0:
# x == +inf
if y_is_finite:
real = math.cos(y)
imag = math.sin(y)
# Avoid NaNs if math.cos(y) or math.sin(y) == 0
# (e.g. cmath.exp(inf + 0j) == inf + 0j)
if real != 0:
real *= x
if imag != 0:
imag *= x
return complex(real, imag)
else:
return complex(x, NAN)
else:
# x == -inf
if y_is_finite:
r = math.exp(x)
c = math.cos(y)
s = math.sin(y)
return complex(r * c, r * s)
else:
r = 0
return complex(r, r)
@lower(cmath.log, types.Complex)
@intrinsic_complex_unary
def log_impl(x, y, x_is_finite, y_is_finite):
"""cmath.log(x + y j)"""
a = math.log(math.hypot(x, y))
b = math.atan2(y, x)
return complex(a, b)
@lower(cmath.log, types.Complex, types.Complex)
def log_base_impl(context, builder, sig, args):
"""cmath.log(z, base)"""
[z, base] = args
def log_base(z, base):
return cmath.log(z) / cmath.log(base)
res = context.compile_internal(builder, log_base, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@overload(cmath.log10)
def impl_cmath_log10(z):
if not isinstance(z, types.Complex):
return
LN_10 = 2.302585092994045684
def log10_impl(z):
"""cmath.log10(z)"""
z = cmath.log(z)
# This formula gives better results on +/-inf than cmath.log(z, 10)
# See http://bugs.python.org/issue22544
return complex(z.real / LN_10, z.imag / LN_10)
return log10_impl
@overload(cmath.phase)
def phase_impl(x):
"""cmath.phase(x + y j)"""
if not isinstance(x, types.Complex):
return
def impl(x):
return math.atan2(x.imag, x.real)
return impl
@overload(cmath.polar)
def polar_impl(x):
if not isinstance(x, types.Complex):
return
def impl(x):
r, i = x.real, x.imag
return math.hypot(r, i), math.atan2(i, r)
return impl
@lower(cmath.sqrt, types.Complex)
def sqrt_impl(context, builder, sig, args):
# We risk spurious overflow for components >= FLT_MAX / (1 + sqrt(2)).
SQRT2 = 1.414213562373095048801688724209698079E0
ONE_PLUS_SQRT2 = (1. + SQRT2)
theargflt = sig.args[0].underlying_float
# Get a type specific maximum value so scaling for overflow is based on that
MAX = mathimpl.DBL_MAX if theargflt.bitwidth == 64 else mathimpl.FLT_MAX
# THRES will be double precision, should not impact typing as it's just
# used for comparison, there *may* be a few values near THRES which
# deviate from e.g. NumPy due to rounding that occurs in the computation
# of this value in the case of a 32bit argument.
THRES = MAX / ONE_PLUS_SQRT2
def sqrt_impl(z):
"""cmath.sqrt(z)"""
# This is NumPy's algorithm, see npy_csqrt() in npy_math_complex.c.src
a = z.real
b = z.imag
if a == 0.0 and b == 0.0:
return complex(abs(b), b)
if math.isinf(b):
return complex(abs(b), b)
if math.isnan(a):
return complex(a, a)
if math.isinf(a):
if a < 0.0:
return complex(abs(b - b), math.copysign(a, b))
else:
return complex(a, math.copysign(b - b, b))
# The remaining special case (b is NaN) is handled just fine by
# the normal code path below.
# Scale to avoid overflow
if abs(a) >= THRES or abs(b) >= THRES:
a *= 0.25
b *= 0.25
scale = True
else:
scale = False
# Algorithm 312, CACM vol 10, Oct 1967
if a >= 0:
t = math.sqrt((a + math.hypot(a, b)) * 0.5)
real = t
imag = b / (2 * t)
else:
t = math.sqrt((-a + math.hypot(a, b)) * 0.5)
real = abs(b) / (2 * t)
imag = math.copysign(t, b)
# Rescale
if scale:
return complex(real * 2, imag)
else:
return complex(real, imag)
res = context.compile_internal(builder, sqrt_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@lower(cmath.cos, types.Complex)
def cos_impl(context, builder, sig, args):
def cos_impl(z):
"""cmath.cos(z) = cmath.cosh(z j)"""
return cmath.cosh(complex(-z.imag, z.real))
res = context.compile_internal(builder, cos_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@overload(cmath.cosh)
def impl_cmath_cosh(z):
if not isinstance(z, types.Complex):
return
def cosh_impl(z):
"""cmath.cosh(z)"""
x = z.real
y = z.imag
if math.isinf(x):
if math.isnan(y):
# x = +inf, y = NaN => cmath.cosh(x + y j) = inf + Nan * j
real = abs(x)
imag = y
elif y == 0.0:
# x = +inf, y = 0 => cmath.cosh(x + y j) = inf + 0j
real = abs(x)
imag = y
else:
real = math.copysign(x, math.cos(y))
imag = math.copysign(x, math.sin(y))
if x < 0.0:
# x = -inf => negate imaginary part of result
imag = -imag
return complex(real, imag)
return complex(math.cos(y) * math.cosh(x),
math.sin(y) * math.sinh(x))
return cosh_impl
@lower(cmath.sin, types.Complex)
def sin_impl(context, builder, sig, args):
def sin_impl(z):
"""cmath.sin(z) = -j * cmath.sinh(z j)"""
r = cmath.sinh(complex(-z.imag, z.real))
return complex(r.imag, -r.real)
res = context.compile_internal(builder, sin_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@overload(cmath.sinh)
def impl_cmath_sinh(z):
if not isinstance(z, types.Complex):
return
def sinh_impl(z):
"""cmath.sinh(z)"""
x = z.real
y = z.imag
if math.isinf(x):
if math.isnan(y):
# x = +/-inf, y = NaN => cmath.sinh(x + y j) = x + NaN * j
real = x
imag = y
else:
real = math.cos(y)
imag = math.sin(y)
if real != 0.:
real *= x
if imag != 0.:
imag *= abs(x)
return complex(real, imag)
return complex(math.cos(y) * math.sinh(x),
math.sin(y) * math.cosh(x))
return sinh_impl
@lower(cmath.tan, types.Complex)
def tan_impl(context, builder, sig, args):
def tan_impl(z):
"""cmath.tan(z) = -j * cmath.tanh(z j)"""
r = cmath.tanh(complex(-z.imag, z.real))
return complex(r.imag, -r.real)
res = context.compile_internal(builder, tan_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@overload(cmath.tanh)
def impl_cmath_tanh(z):
if not isinstance(z, types.Complex):
return
def tanh_impl(z):
"""cmath.tanh(z)"""
x = z.real
y = z.imag
if math.isinf(x):
real = math.copysign(1., x)
if math.isinf(y):
imag = 0.
else:
imag = math.copysign(0., math.sin(2. * y))
return complex(real, imag)
# This is CPython's algorithm (see c_tanh() in cmathmodule.c).
# XXX how to force float constants into single precision?
tx = math.tanh(x)
ty = math.tan(y)
cx = 1. / math.cosh(x)
txty = tx * ty
denom = 1. + txty * txty
return complex(
tx * (1. + ty * ty) / denom,
((ty / denom) * cx) * cx)
return tanh_impl
@lower(cmath.acos, types.Complex)
def acos_impl(context, builder, sig, args):
LN_4 = math.log(4)
THRES = mathimpl.FLT_MAX / 4
def acos_impl(z):
"""cmath.acos(z)"""
# CPython's algorithm (see c_acos() in cmathmodule.c)
if abs(z.real) > THRES or abs(z.imag) > THRES:
# Avoid unnecessary overflow for large arguments
# (also handles infinities gracefully)
real = math.atan2(abs(z.imag), z.real)
imag = math.copysign(
math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4,
-z.imag)
return complex(real, imag)
else:
s1 = cmath.sqrt(complex(1. - z.real, -z.imag))
s2 = cmath.sqrt(complex(1. + z.real, z.imag))
real = 2. * math.atan2(s1.real, s2.real)
imag = math.asinh(s2.real * s1.imag - s2.imag * s1.real)
return complex(real, imag)
res = context.compile_internal(builder, acos_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@overload(cmath.acosh)
def impl_cmath_acosh(z):
if not isinstance(z, types.Complex):
return
LN_4 = math.log(4)
THRES = mathimpl.FLT_MAX / 4
def acosh_impl(z):
"""cmath.acosh(z)"""
# CPython's algorithm (see c_acosh() in cmathmodule.c)
if abs(z.real) > THRES or abs(z.imag) > THRES:
# Avoid unnecessary overflow for large arguments
# (also handles infinities gracefully)
real = math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4
imag = math.atan2(z.imag, z.real)
return complex(real, imag)
else:
s1 = cmath.sqrt(complex(z.real - 1., z.imag))
s2 = cmath.sqrt(complex(z.real + 1., z.imag))
real = math.asinh(s1.real * s2.real + s1.imag * s2.imag)
imag = 2. * math.atan2(s1.imag, s2.real)
return complex(real, imag)
# Condensed formula (NumPy)
#return cmath.log(z + cmath.sqrt(z + 1.) * cmath.sqrt(z - 1.))
return acosh_impl
@lower(cmath.asinh, types.Complex)
def asinh_impl(context, builder, sig, args):
LN_4 = math.log(4)
THRES = mathimpl.FLT_MAX / 4
def asinh_impl(z):
"""cmath.asinh(z)"""
# CPython's algorithm (see c_asinh() in cmathmodule.c)
if abs(z.real) > THRES or abs(z.imag) > THRES:
real = math.copysign(
math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4,
z.real)
imag = math.atan2(z.imag, abs(z.real))
return complex(real, imag)
else:
s1 = cmath.sqrt(complex(1. + z.imag, -z.real))
s2 = cmath.sqrt(complex(1. - z.imag, z.real))
real = math.asinh(s1.real * s2.imag - s2.real * s1.imag)
imag = math.atan2(z.imag, s1.real * s2.real - s1.imag * s2.imag)
return complex(real, imag)
res = context.compile_internal(builder, asinh_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@lower(cmath.asin, types.Complex)
def asin_impl(context, builder, sig, args):
def asin_impl(z):
"""cmath.asin(z) = -j * cmath.asinh(z j)"""
r = cmath.asinh(complex(-z.imag, z.real))
return complex(r.imag, -r.real)
res = context.compile_internal(builder, asin_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@lower(cmath.atan, types.Complex)
def atan_impl(context, builder, sig, args):
def atan_impl(z):
"""cmath.atan(z) = -j * cmath.atanh(z j)"""
r = cmath.atanh(complex(-z.imag, z.real))
if math.isinf(z.real) and math.isnan(z.imag):
# XXX this is odd but necessary
return complex(r.imag, r.real)
else:
return complex(r.imag, -r.real)
res = context.compile_internal(builder, atan_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
@lower(cmath.atanh, types.Complex)
def atanh_impl(context, builder, sig, args):
LN_4 = math.log(4)
THRES_LARGE = math.sqrt(mathimpl.FLT_MAX / 4)
THRES_SMALL = math.sqrt(mathimpl.FLT_MIN)
PI_12 = math.pi / 2
def atanh_impl(z):
"""cmath.atanh(z)"""
# CPython's algorithm (see c_atanh() in cmathmodule.c)
if z.real < 0.:
# Reduce to case where z.real >= 0., using atanh(z) = -atanh(-z).
negate = True
z = -z
else:
negate = False
ay = abs(z.imag)
if math.isnan(z.real) or z.real > THRES_LARGE or ay > THRES_LARGE:
if math.isinf(z.imag):
real = math.copysign(0., z.real)
elif math.isinf(z.real):
real = 0.
else:
# may be safe from overflow, depending on hypot's implementation...
h = math.hypot(z.real * 0.5, z.imag * 0.5)
real = z.real/4./h/h
imag = -math.copysign(PI_12, -z.imag)
elif z.real == 1. and ay < THRES_SMALL:
# C99 standard says: atanh(1+/-0.) should be inf +/- 0j
if ay == 0.:
real = INF
imag = z.imag
else:
real = -math.log(math.sqrt(ay) /
math.sqrt(math.hypot(ay, 2.)))
imag = math.copysign(math.atan2(2., -ay) / 2, z.imag)
else:
sqay = ay * ay
zr1 = 1 - z.real
real = math.log1p(4. * z.real / (zr1 * zr1 + sqay)) * 0.25
imag = -math.atan2(-2. * z.imag,
zr1 * (1 + z.real) - sqay) * 0.5
if math.isnan(z.imag):
imag = NAN
if negate:
return complex(-real, -imag)
else:
return complex(real, imag)
res = context.compile_internal(builder, atanh_impl, sig, args)
return impl_ret_untracked(context, builder, sig, res)
| {
"content_hash": "21edfd33342ba77f52f007dbd0dce9f8",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 83,
"avg_line_length": 32.48523985239852,
"alnum_prop": 0.5363207815073551,
"repo_name": "numba/numba",
"id": "4305e9d45dde68b5f32b56e462bd5b064bfaed18",
"size": "17607",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "numba/cpython/cmathimpl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3522"
},
{
"name": "C",
"bytes": "574888"
},
{
"name": "C++",
"bytes": "166526"
},
{
"name": "Cuda",
"bytes": "2063"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "9400448"
},
{
"name": "Shell",
"bytes": "13621"
}
],
"symlink_target": ""
} |
"""
Test for softmax_regression.ipynb
"""
import os
from pylearn2.testing.skip import skip_if_no_data
from pylearn2.config import yaml_parse
def test():
skip_if_no_data()
dirname = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
with open(os.path.join(dirname, 'sr_dataset.yaml'), 'r') as f:
dataset = f.read()
hyper_params = {'train_stop': 50}
dataset = dataset % (hyper_params)
with open(os.path.join(dirname, 'sr_model.yaml'), 'r') as f:
model = f.read()
with open(os.path.join(dirname, 'sr_algorithm.yaml'), 'r') as f:
algorithm = f.read()
hyper_params = {'batch_size': 10,
'valid_stop': 50050}
algorithm = algorithm % (hyper_params)
with open(os.path.join(dirname, 'sr_train.yaml'), 'r') as f:
train = f.read()
save_path = os.path.dirname(os.path.realpath(__file__))
train = train % locals()
train = yaml_parse.load(train)
train.main_loop()
try:
os.remove("{}/softmax_regression.pkl".format(save_path))
os.remove("{}/softmax_regression_best.pkl".format(save_path))
except:
pass
if __name__ == '__main__':
test()
| {
"content_hash": "79e1b6dcbf8ceaafc221ad0f7fbb8d54",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 24.8125,
"alnum_prop": 0.5936188077246012,
"repo_name": "KennethPierce/pylearnk",
"id": "a14ce0b609595713beca48d4060ac87e9db71034",
"size": "1191",
"binary": false,
"copies": "3",
"ref": "refs/heads/fixNogil/master",
"path": "pylearn2/scripts/tutorials/softmax_regression/tests/test_softmaxreg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "53316"
},
{
"name": "C++",
"bytes": "46935"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1266727"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3410626"
},
{
"name": "Shell",
"bytes": "4195"
}
],
"symlink_target": ""
} |
import argparse
import collections
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pysam
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = np.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of tags which were aligned to regions of the reference genome')
parser.add_argument('--inputFile', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--bamFile', help='BAM file with aligned reads.')
parser.add_argument('--rangesFile', default=None, help='BED file with chromosome, start and stop positions.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the pdf and tabular file.')
return parser
def compare_read_families_refGenome(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile
name1 = args.inputName1
name1 = name1.split(".tabular")[0]
bamFile = args.bamFile
rangesFile = args.rangesFile
title_file = args.output_pdf
title_file2 = args.output_tabular
sep = "\t"
with open(title_file2, "w") as output_file, PdfPages(title_file) as pdf:
data_array = readFileReferenceFree(firstFile, "\t")
pysam.index(bamFile)
bam = pysam.AlignmentFile(bamFile, "rb")
qname_dict = collections.OrderedDict()
if rangesFile is not None:
with open(rangesFile, 'r') as regs:
range_array = np.genfromtxt(regs, skip_header=0, delimiter='\t', comments='#', dtype=str)
if range_array.ndim == 0:
print("Error: file has 0 lines")
exit(2)
if range_array.ndim == 1:
chrList = range_array[0]
start_posList = range_array[1].astype(int)
stop_posList = range_array[2].astype(int)
chrList = [chrList.tolist()]
start_posList = [start_posList.tolist()]
stop_posList = [stop_posList.tolist()]
else:
chrList = range_array[:, 0]
start_posList = range_array[:, 1].astype(int)
stop_posList = range_array[:, 2].astype(int)
if len(start_posList) != len(stop_posList):
print("start_positions and end_positions do not have the same length")
exit(3)
chrList = np.array(chrList)
start_posList = np.array(start_posList).astype(int)
stop_posList = np.array(stop_posList).astype(int)
for chr, start_pos, stop_pos in zip(chrList, start_posList, stop_posList):
chr_start_stop = "{}_{}_{}".format(chr, start_pos, stop_pos)
qname_dict[chr_start_stop] = []
for read in bam.fetch(chr, start_pos, stop_pos):
if not read.is_unmapped:
if re.search('_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
qname_dict[chr_start_stop].append(tags)
else:
for read in bam.fetch():
if not read.is_unmapped:
if re.search(r'_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
if read.reference_name not in qname_dict:
qname_dict[read.reference_name] = [tags]
else:
qname_dict[read.reference_name].append(tags)
seq = np.array(data_array[:, 1])
tags = np.array(data_array[:, 2])
quant = np.array(data_array[:, 0]).astype(int)
group = np.array(list(qname_dict.keys()))
all_ab = seq[np.where(tags == "ab")[0]]
all_ba = seq[np.where(tags == "ba")[0]]
quant_ab = quant[np.where(tags == "ab")[0]]
quant_ba = quant[np.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab))
seqDic_ba = dict(zip(all_ba, quant_ba))
lst_ab = []
lst_ba = []
quantAfterRegion = []
length_regions = 0
for i in group:
lst_ab_r = []
lst_ba_r = []
seq_mut = qname_dict[i]
if rangesFile is None:
seq_mut, seqMut_index = np.unique(np.array(seq_mut), return_index=True)
length_regions = length_regions + len(seq_mut) * 2
for r in seq_mut:
count_ab = seqDic_ab.get(r)
count_ba = seqDic_ba.get(r)
lst_ab_r.append(count_ab)
lst_ab.append(count_ab)
lst_ba_r.append(count_ba)
lst_ba.append(count_ba)
dataAB = np.array(lst_ab_r)
dataBA = np.array(lst_ba_r)
bigFamilies = np.where(dataAB > 20)[0]
dataAB[bigFamilies] = 22
bigFamilies = np.where(dataBA > 20)[0]
dataBA[bigFamilies] = 22
quantAll = np.concatenate((dataAB, dataBA))
quantAfterRegion.append(quantAll)
quant_ab = np.array(lst_ab)
quant_ba = np.array(lst_ba)
maximumX = np.amax(np.concatenate(quantAfterRegion))
minimumX = np.amin(np.concatenate(quantAfterRegion))
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
colors = ["#6E6E6E", "#0431B4", "#5FB404", "#B40431", "#F4FA58", "#DF7401", "#81DAF5"]
col = []
for i in range(0, len(group)):
col.append(colors[i])
counts = plt.hist(quantAfterRegion, bins=range(minimumX, maximumX + 1), stacked=False, label=group,
align="left", alpha=1, color=col, edgecolor="black", linewidth=1)
ticks = np.arange(minimumX - 1, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(np.array(ticks), ticks1)
count = np.bincount([int(_) for _ in quant_ab]) # original counts
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.15, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}".format(max(map(int, quant_ab)), count[len(count) - 1], float(count[len(count) - 1]) / sum(count), sum(np.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = np.bincount([int(_) for _ in quant_ba]) # original counts
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(map(int, quant_ba)), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
plt.text(0.55, 0.2125, "total nr. of tags:", size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.2125, "{:,} ({:,})".format(length_regions, length_regions / 2), size=11,
transform=plt.gcf().transFigure)
legend4 = "* In the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n"
plt.text(0.1, 0.01, legend4, size=11, transform=plt.gcf().transFigure)
space = 0
for i, count in zip(group, quantAfterRegion):
plt.text(0.55, 0.15 - space, "{}:\n".format(i), size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.15 - space, "{:,}\n".format(len(count) / 2), size=11, transform=plt.gcf().transFigure)
space = space + 0.02
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
output_file.write("Dataset:{}{}\n".format(sep, name1))
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(map(int, quant_ab)), sep, max(map(int, quant_ba))))
output_file.write("absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write("relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep, float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("total nr. of reads{}{}\n".format(sep, sum(np.array(data_array[:, 0]).astype(int))))
output_file.write("total nr. of tags{}{} ({})\n".format(sep, length_regions, length_regions / 2))
output_file.write("\n\nValues from family size distribution\n")
output_file.write("{}".format(sep))
for i in group:
output_file.write("{}{}".format(i, sep))
output_file.write("\n")
j = 0
for fs in counts[1][0:len(counts[1]) - 1]:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(group) == 1:
output_file.write("{}{}".format(int(counts[0][j]), sep))
else:
for n in range(len(group)):
output_file.write("{}{}".format(int(counts[0][n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(group) == 1:
output_file.write("{}{}".format(int(sum(counts[0])), sep))
else:
for i in counts[0]:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
output_file.write("\n\nIn the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n")
output_file.write("Region{}total nr. of tags per region\n".format(sep))
for i, count in zip(group, quantAfterRegion):
output_file.write("{}{}{}\n".format(i, sep, len(count) / 2))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_refGenome(sys.argv))
| {
"content_hash": "cf3ec6d0eafb817f73b70ef580162d43",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 188,
"avg_line_length": 43.81818181818182,
"alnum_prop": 0.5616994407360635,
"repo_name": "mblue9/tools-iuc",
"id": "12215651e4d3f57cb8116ad1d086e6fcfc5eba9f",
"size": "11924",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/fsd/fsd_regions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4220"
},
{
"name": "HTML",
"bytes": "11538824"
},
{
"name": "Mako",
"bytes": "2116"
},
{
"name": "Max",
"bytes": "140358"
},
{
"name": "OpenEdge ABL",
"bytes": "1960016"
},
{
"name": "Pep8",
"bytes": "87474"
},
{
"name": "Perl",
"bytes": "60722"
},
{
"name": "Python",
"bytes": "714519"
},
{
"name": "R",
"bytes": "240948"
},
{
"name": "Rebol",
"bytes": "1225"
},
{
"name": "Roff",
"bytes": "3011"
},
{
"name": "Shell",
"bytes": "83592"
},
{
"name": "UnrealScript",
"bytes": "660637"
},
{
"name": "eC",
"bytes": "24"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import glob
import os
import xml.dom.minidom as DOM
from textwrap import dedent
import coverage
from mock import patch
from pants.backend.python.tasks.pytest_run import PytestRun
from pants.base.exceptions import TestFailedTaskError
from pants.util.contextutil import pushd
from pants.util.timeout import TimeoutReached
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonTestBuilderTestBase(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PytestRun
def run_tests(self, targets, **options):
test_options = {
'colors': False,
'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'.
}
test_options.update(options)
self.set_options(**test_options)
context = self.context(target_roots=targets)
pytest_run_task = self.create_task(context)
with pushd(self.build_root):
pytest_run_task.execute()
def run_failing_tests(self, targets, failed_targets, **options):
with self.assertRaises(TestFailedTaskError) as cm:
self.run_tests(targets=targets, **options)
self.assertEqual(set(failed_targets), set(cm.exception.failed_targets))
class PythonTestBuilderTestEmpty(PythonTestBuilderTestBase):
def test_empty(self):
self.run_tests(targets=[])
class PythonTestBuilderTest(PythonTestBuilderTestBase):
def setUp(self):
super(PythonTestBuilderTest, self).setUp()
self.create_file(
'lib/core.py',
dedent("""
def one(): # line 1
return 1 # line 2
# line 3
# line 4
def two(): # line 5
return 2 # line 6
""").strip())
self.add_to_build_file(
'lib',
dedent("""
python_library(
name='core',
sources=[
'core.py'
]
)
"""))
self.create_file(
'tests/test_core_green.py',
dedent("""
import unittest2 as unittest
import core
class CoreGreenTest(unittest.TestCase):
def test_one(self):
self.assertEqual(1, core.one())
"""))
self.create_file(
'tests/test_core_red.py',
dedent("""
import core
def test_two():
assert 1 == core.two()
"""))
self.create_file(
'tests/test_core_red_in_class.py',
dedent("""
import unittest2 as unittest
import core
class CoreRedClassTest(unittest.TestCase):
def test_one_in_class(self):
self.assertEqual(1, core.two())
"""))
self.create_file(
'tests/test_core_sleep.py',
dedent("""
import core
def test_three():
assert 1 == core.one()
"""))
self.add_to_build_file(
'tests',
dedent("""
python_tests(
name='green',
sources=[
'test_core_green.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red',
sources=[
'test_core_red.py',
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red_in_class',
sources=[
'test_core_red_in_class.py',
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='sleep_no_timeout',
sources=[
'test_core_sleep.py',
],
timeout = 0,
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='sleep_timeout',
sources=[
'test_core_sleep.py',
],
timeout = 1,
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='all',
sources=[
'test_core_green.py',
'test_core_red.py',
],
dependencies=[
'lib:core'
]
)
python_tests(
name='all-with-coverage',
sources=[
'test_core_green.py',
'test_core_red.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
"""))
self.green = self.target('tests:green')
self.red = self.target('tests:red')
self.red_in_class = self.target('tests:red_in_class')
self.sleep_no_timeout = self.target('tests:sleep_no_timeout')
self.sleep_timeout = self.target('tests:sleep_timeout')
self.all = self.target('tests:all')
self.all_with_coverage = self.target('tests:all-with-coverage')
def test_green(self):
self.run_tests(targets=[self.green])
def test_red(self):
self.run_failing_tests(targets=[self.red], failed_targets=[self.red])
def test_red_test_in_class(self):
# for test in a class, the failure line is in the following format
# F testprojects/tests/python/pants/constants_only/test_fail.py::TestClassName::test_boom
self.run_failing_tests(targets=[self.red_in_class], failed_targets=[self.red_in_class])
def test_mixed(self):
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
def test_one_timeout(self):
"""When we have two targets, any of them doesn't have a timeout, and we have no default, then no timeout is set."""
with patch('pants.backend.core.tasks.test_task_mixin.Timeout') as mock_timeout:
self.run_tests(targets=[self.sleep_no_timeout, self.sleep_timeout])
mock_timeout.assert_called_with(None)
def test_timeout(self):
"""Check that a failed timeout returns the right results."""
with patch('pants.backend.core.tasks.test_task_mixin.Timeout') as mock_timeout:
mock_timeout().__exit__.side_effect = TimeoutReached(1)
self.run_failing_tests(targets=[self.sleep_timeout],
failed_targets=[self.sleep_timeout])
mock_timeout.assert_called_with(1)
def test_junit_xml_option(self):
# We expect xml of the following form:
# <testsuite errors=[Ne] failures=[Nf] skips=[Ns] tests=[Nt] ...>
# <testcase classname="..." name="..." .../>
# <testcase classname="..." name="..." ...>
# <failure ...>...</failure>
# </testcase>
# </testsuite>
report_basedir = os.path.join(self.build_root, 'dist', 'junit_option')
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
junit_xml_dir=report_basedir)
files = glob.glob(os.path.join(report_basedir, '*.xml'))
self.assertEqual(1, len(files), 'Expected 1 file, found: {}'.format(files))
junit_xml = files[0]
root = DOM.parse(junit_xml).documentElement
self.assertEqual(2, len(root.childNodes))
self.assertEqual(2, int(root.getAttribute('tests')))
self.assertEqual(1, int(root.getAttribute('failures')))
self.assertEqual(0, int(root.getAttribute('errors')))
self.assertEqual(0, int(root.getAttribute('skips')))
children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
def coverage_data_file(self):
return os.path.join(self.build_root, '.coverage')
def load_coverage_data(self, path):
data_file = self.coverage_data_file()
self.assertTrue(os.path.isfile(data_file))
coverage_data = coverage.coverage(data_file=data_file)
coverage_data.load()
_, all_statements, not_run_statements, _ = coverage_data.analysis(path)
return all_statements, not_run_statements
def test_coverage_simple_option(self):
# TODO(John Sirois): Consider eliminating support for "simple" coverage or at least formalizing
# the coverage option value that turns this on to "1" or "all" or "simple" = anything formal.
simple_coverage_kwargs = {'coverage': '1'}
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_tests(targets=[self.green], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([6], not_run_statements)
self.run_failing_tests(targets=[self.red], failed_targets=[self.red], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([2], not_run_statements)
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
**simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
# The all target has no coverage attribute and the code under test does not follow the
# auto-discover pattern so we should get no coverage.
self.run_failing_tests(targets=[self.all], failed_targets=[self.all], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
self.run_failing_tests(targets=[self.all_with_coverage],
failed_targets=[self.all_with_coverage],
**simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_coverage_modules_dne_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
# modules: should trump .coverage
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
coverage='modules:does_not_exist,nor_does_this')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
def test_coverage_modules_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_failing_tests(targets=[self.all], failed_targets=[self.all], coverage='modules:core')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_coverage_paths_dne_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
# paths: should trump .coverage
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
coverage='paths:does_not_exist/,nor_does_this/')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
def test_coverage_paths_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_failing_tests(targets=[self.all], failed_targets=[self.all], coverage='paths:core.py')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_sharding(self):
self.run_failing_tests(targets=[self.red, self.green], failed_targets=[self.red], shard='0/2')
self.run_tests(targets=[self.red, self.green], shard='1/2')
def test_sharding_single(self):
self.run_failing_tests(targets=[self.red], failed_targets=[self.red], shard='0/1')
def test_sharding_invalid_shard_too_small(self):
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='-1/1')
def test_sharding_invalid_shard_too_big(self):
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1/1')
def test_sharding_invalid_shard_bad_format(self):
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1')
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1/2/3')
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1/a')
| {
"content_hash": "ec2109f54bff94c74ebf9917e6c8b176",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 119,
"avg_line_length": 35.58485639686684,
"alnum_prop": 0.6094357619781349,
"repo_name": "slyphon/pants",
"id": "fae836469436e77e362738035c0e20b38d62fd04",
"size": "13776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/python/tasks/test_pytest_run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "70362"
},
{
"name": "Java",
"bytes": "309840"
},
{
"name": "JavaScript",
"bytes": "28545"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4020643"
},
{
"name": "Scala",
"bytes": "85437"
},
{
"name": "Shell",
"bytes": "49550"
},
{
"name": "Thrift",
"bytes": "2858"
}
],
"symlink_target": ""
} |
import datetime
import os
from typing import Any, Dict, List
import orjson
from zerver.data_import.import_util import SubscriberHandler, ZerverFieldsT, build_recipients
from zerver.data_import.rocketchat import (
build_custom_emoji,
build_reactions,
categorize_channels_and_map_with_id,
convert_channel_data,
convert_huddle_data,
convert_stream_subscription_data,
do_convert_data,
map_receiver_id_to_recipient_id,
map_upload_id_to_upload_data,
map_user_id_to_user,
map_username_to_user_id,
process_message_attachment,
process_users,
rocketchat_data_to_dict,
separate_channel_private_and_livechat_messages,
truncate_name,
)
from zerver.data_import.sequencer import IdMapper
from zerver.data_import.user_handler import UserHandler
from zerver.lib.emoji import name_to_codepoint
from zerver.lib.import_realm import do_import_realm
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Message, Reaction, Recipient, UserProfile, get_realm, get_user
class RocketChatImporter(ZulipTestCase):
def test_rocketchat_data_to_dict(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
self.assert_length(rocketchat_data, 7)
self.assert_length(rocketchat_data["user"], 6)
self.assertEqual(rocketchat_data["user"][2]["username"], "harry.potter")
self.assert_length(rocketchat_data["user"][2]["__rooms"], 10)
self.assert_length(rocketchat_data["room"], 16)
self.assertEqual(rocketchat_data["room"][0]["_id"], "GENERAL")
self.assertEqual(rocketchat_data["room"][0]["name"], "general")
self.assert_length(rocketchat_data["message"], 87)
self.assertEqual(rocketchat_data["message"][1]["msg"], "Hey everyone, how's it going??")
self.assertEqual(rocketchat_data["message"][1]["rid"], "GENERAL")
self.assertEqual(rocketchat_data["message"][1]["u"]["username"], "priyansh3133")
self.assert_length(rocketchat_data["custom_emoji"]["emoji"], 3)
self.assertEqual(rocketchat_data["custom_emoji"]["emoji"][0]["name"], "tick")
self.assert_length(rocketchat_data["upload"]["upload"], 4)
self.assertEqual(rocketchat_data["upload"]["upload"][0]["name"], "harry-ron.jpg")
def test_map_user_id_to_user(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
self.assert_length(rocketchat_data["user"], 6)
self.assert_length(user_id_to_user_map, 6)
self.assertEqual(
user_id_to_user_map[rocketchat_data["user"][0]["_id"]], rocketchat_data["user"][0]
)
def test_map_username_to_user_id(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
username_to_user_id_map = map_username_to_user_id(user_id_to_user_map)
self.assert_length(rocketchat_data["user"], 6)
self.assert_length(username_to_user_id_map, 6)
self.assertEqual(
username_to_user_id_map[rocketchat_data["user"][0]["username"]],
rocketchat_data["user"][0]["_id"],
)
def test_process_users(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
realm_id = 3
domain_name = "zulip.com"
user_handler = UserHandler()
user_id_mapper = IdMapper()
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
self.assert_length(user_handler.get_all_users(), 6)
self.assertTrue(user_id_mapper.has(rocketchat_data["user"][0]["_id"]))
self.assertTrue(user_id_mapper.has(rocketchat_data["user"][4]["_id"]))
user_id = user_id_mapper.get(rocketchat_data["user"][0]["_id"])
user = user_handler.get_user(user_id)
self.assertEqual(user["full_name"], rocketchat_data["user"][0]["name"])
self.assertEqual(user["avatar_source"], "G")
self.assertEqual(user["delivery_email"], "rocket.cat-bot@zulip.com")
self.assertEqual(user["email"], "rocket.cat-bot@zulip.com")
self.assertEqual(user["full_name"], "Rocket.Cat")
self.assertEqual(user["id"], 1)
self.assertEqual(user["is_active"], False)
self.assertEqual(user["is_mirror_dummy"], False)
self.assertEqual(user["is_bot"], True)
self.assertEqual(user["bot_type"], 1)
self.assertEqual(user["bot_owner"], 2)
self.assertEqual(user["role"], UserProfile.ROLE_MEMBER)
self.assertEqual(user["realm"], realm_id)
self.assertEqual(user["short_name"], "rocket.cat")
self.assertEqual(user["timezone"], "UTC")
user_id = user_id_mapper.get(rocketchat_data["user"][2]["_id"])
user = user_handler.get_user(user_id)
self.assertEqual(user["full_name"], rocketchat_data["user"][2]["name"])
self.assertEqual(user["avatar_source"], "G")
self.assertEqual(user["delivery_email"], "harrypotter@email.com")
self.assertEqual(user["email"], "harrypotter@email.com")
self.assertEqual(user["full_name"], "Harry Potter")
self.assertEqual(user["id"], 3)
self.assertEqual(user["is_active"], True)
self.assertEqual(user["is_mirror_dummy"], False)
self.assertEqual(user["is_bot"], False)
self.assertEqual(user["bot_type"], None)
self.assertEqual(user["bot_owner"], None)
self.assertEqual(user["role"], UserProfile.ROLE_REALM_OWNER)
self.assertEqual(user["realm"], realm_id)
self.assertEqual(user["short_name"], "harry.potter")
self.assertEqual(user["timezone"], "UTC")
# Test `is_mirror_dummy` set for users of type `unknown`
rocketchat_data["user"].append(
{
"_id": "s0m34ndmID",
"createdAt": datetime.datetime(2019, 11, 6, 0, 38, 42, 796000),
"type": "unknown",
"roles": ["unknown"],
"name": "Unknown user",
"username": "unknown",
}
)
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
self.assert_length(user_handler.get_all_users(), 7)
self.assertTrue(user_id_mapper.has(rocketchat_data["user"][6]["_id"]))
user_id = user_id_mapper.get(rocketchat_data["user"][6]["_id"])
user = user_handler.get_user(user_id)
self.assertEqual(user["id"], 7)
self.assertEqual(user["is_active"], False)
self.assertEqual(user["is_mirror_dummy"], True)
self.assertEqual(user["is_bot"], False)
def test_categorize_channels_and_map_with_id(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
livechat_id_to_livechat_map: Dict[str, Dict[str, Any]] = {}
with self.assertLogs(level="INFO"):
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
)
self.assert_length(rocketchat_data["room"], 16)
# Teams are a subset of rooms.
self.assert_length(room_id_to_room_map, 6)
self.assert_length(team_id_to_team_map, 1)
self.assert_length(dsc_id_to_dsc_map, 5)
self.assert_length(direct_id_to_direct_map, 2)
self.assert_length(huddle_id_to_huddle_map, 1)
self.assert_length(livechat_id_to_livechat_map, 2)
room_id = rocketchat_data["room"][0]["_id"]
self.assertIn(room_id, room_id_to_room_map)
self.assertEqual(room_id_to_room_map[room_id], rocketchat_data["room"][0])
team_id = rocketchat_data["room"][3]["teamId"]
self.assertIn(team_id, team_id_to_team_map)
self.assertEqual(team_id_to_team_map[team_id], rocketchat_data["room"][3])
dsc_id = rocketchat_data["room"][7]["_id"]
self.assertIn(dsc_id, dsc_id_to_dsc_map)
self.assertEqual(dsc_id_to_dsc_map[dsc_id], rocketchat_data["room"][7])
direct_id = rocketchat_data["room"][4]["_id"]
self.assertIn(direct_id, direct_id_to_direct_map)
self.assertEqual(direct_id_to_direct_map[direct_id], rocketchat_data["room"][4])
huddle_id = rocketchat_data["room"][12]["_id"]
self.assertIn(huddle_id, huddle_id_to_huddle_map)
self.assertEqual(huddle_id_to_huddle_map[huddle_id], rocketchat_data["room"][12])
livechat_id = rocketchat_data["room"][14]["_id"]
self.assertIn(livechat_id, livechat_id_to_livechat_map)
self.assertEqual(livechat_id_to_livechat_map[livechat_id], rocketchat_data["room"][14])
def test_convert_channel_data(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
realm_id = 3
stream_id_mapper = IdMapper()
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
livechat_id_to_livechat_map: Dict[str, Dict[str, Any]] = {}
with self.assertLogs(level="INFO"):
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
)
zerver_stream = convert_channel_data(
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
stream_id_mapper=stream_id_mapper,
realm_id=realm_id,
)
# Only rooms are converted to streams.
self.assert_length(room_id_to_room_map, 6)
self.assert_length(zerver_stream, 6)
# Normal public stream
self.assertEqual(zerver_stream[0]["name"], "general")
self.assertEqual(zerver_stream[0]["invite_only"], False)
self.assertEqual(zerver_stream[0]["description"], "This is a general channel.")
self.assertEqual(zerver_stream[0]["rendered_description"], "")
self.assertEqual(zerver_stream[0]["stream_post_policy"], 1)
self.assertEqual(zerver_stream[0]["realm"], realm_id)
# Private stream
self.assertEqual(zerver_stream[1]["name"], "random")
self.assertEqual(zerver_stream[1]["invite_only"], True)
self.assertEqual(zerver_stream[1]["description"], "")
self.assertEqual(zerver_stream[1]["rendered_description"], "")
self.assertEqual(zerver_stream[1]["stream_post_policy"], 1)
self.assertEqual(zerver_stream[1]["realm"], realm_id)
# Team main
self.assertEqual(zerver_stream[3]["name"], "[TEAM] team-harry-potter")
self.assertEqual(zerver_stream[3]["invite_only"], True)
self.assertEqual(
zerver_stream[3]["description"], "Welcome to the official Harry Potter team."
)
self.assertEqual(zerver_stream[3]["rendered_description"], "")
self.assertEqual(zerver_stream[3]["stream_post_policy"], 1)
self.assertEqual(zerver_stream[3]["realm"], realm_id)
# Team channel
self.assertEqual(zerver_stream[5]["name"], "thp-channel-2")
self.assertEqual(zerver_stream[5]["invite_only"], False)
self.assertEqual(zerver_stream[5]["description"], "[Team team-harry-potter channel]. ")
self.assertEqual(zerver_stream[5]["rendered_description"], "")
self.assertEqual(zerver_stream[5]["stream_post_policy"], 1)
self.assertEqual(zerver_stream[5]["realm"], realm_id)
def test_convert_stream_subscription_data(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
realm_id = 3
domain_name = "zulip.com"
user_handler = UserHandler()
subscriber_handler = SubscriberHandler()
user_id_mapper = IdMapper()
stream_id_mapper = IdMapper()
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
livechat_id_to_livechat_map: Dict[str, Dict[str, Any]] = {}
with self.assertLogs(level="INFO"):
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
)
zerver_stream = convert_channel_data(
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
stream_id_mapper=stream_id_mapper,
realm_id=realm_id,
)
convert_stream_subscription_data(
user_id_to_user_map=user_id_to_user_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
zerver_stream=zerver_stream,
stream_id_mapper=stream_id_mapper,
user_id_mapper=user_id_mapper,
subscriber_handler=subscriber_handler,
)
priyansh_id = user_id_mapper.get(rocketchat_data["user"][1]["_id"])
harry_id = user_id_mapper.get(rocketchat_data["user"][2]["_id"])
hermione_id = user_id_mapper.get(rocketchat_data["user"][3]["_id"])
ron_id = user_id_mapper.get(rocketchat_data["user"][4]["_id"])
voldemort_id = user_id_mapper.get(rocketchat_data["user"][5]["_id"])
self.assertEqual(
subscriber_handler.get_users(stream_id=zerver_stream[0]["id"]),
{priyansh_id, harry_id, ron_id, hermione_id, voldemort_id},
)
self.assertEqual(
subscriber_handler.get_users(stream_id=zerver_stream[1]["id"]), {priyansh_id, harry_id}
)
self.assertEqual(
subscriber_handler.get_users(stream_id=zerver_stream[2]["id"]), {harry_id, hermione_id}
)
self.assertEqual(
subscriber_handler.get_users(stream_id=zerver_stream[3]["id"]),
{harry_id, ron_id, hermione_id},
)
self.assertEqual(subscriber_handler.get_users(stream_id=zerver_stream[4]["id"]), {harry_id})
self.assertEqual(subscriber_handler.get_users(stream_id=zerver_stream[5]["id"]), {harry_id})
# Add a new channel with no user.
no_user_channel: Dict[str, Any] = {
"_id": "rand0mID",
"ts": datetime.datetime(2021, 7, 15, 10, 58, 23, 647000),
"t": "c",
"name": "no-user-channel",
}
room_id_to_room_map[no_user_channel["_id"]] = no_user_channel
zerver_stream = convert_channel_data(
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
stream_id_mapper=stream_id_mapper,
realm_id=realm_id,
)
convert_stream_subscription_data(
user_id_to_user_map=user_id_to_user_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
zerver_stream=zerver_stream,
stream_id_mapper=stream_id_mapper,
user_id_mapper=user_id_mapper,
subscriber_handler=subscriber_handler,
)
self.assert_length(subscriber_handler.get_users(stream_id=zerver_stream[6]["id"]), 0)
self.assertTrue(zerver_stream[6]["deactivated"])
def test_convert_huddle_data(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
realm_id = 3
domain_name = "zulip.com"
user_handler = UserHandler()
subscriber_handler = SubscriberHandler()
user_id_mapper = IdMapper()
huddle_id_mapper = IdMapper()
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
livechat_id_to_livechat_map: Dict[str, Dict[str, Any]] = {}
with self.assertLogs(level="INFO"):
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
)
zerver_huddle = convert_huddle_data(
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
huddle_id_mapper=huddle_id_mapper,
user_id_mapper=user_id_mapper,
subscriber_handler=subscriber_handler,
)
self.assert_length(zerver_huddle, 1)
rc_huddle_id = rocketchat_data["room"][12]["_id"]
self.assertTrue(huddle_id_mapper.has(rc_huddle_id))
huddle_id = huddle_id_mapper.get(rc_huddle_id)
self.assertEqual(subscriber_handler.get_users(huddle_id=huddle_id), {3, 4, 5})
def test_write_emoticon_data(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
output_dir = self.make_import_output_dir("rocketchat")
with self.assertLogs(level="INFO"):
zerver_realmemoji = build_custom_emoji(
realm_id=3,
custom_emoji_data=rocketchat_data["custom_emoji"],
output_dir=output_dir,
)
self.assert_length(zerver_realmemoji, 5)
self.assertEqual(zerver_realmemoji[0]["name"], "tick")
self.assertEqual(zerver_realmemoji[0]["file_name"], "tick.png")
self.assertEqual(zerver_realmemoji[0]["realm"], 3)
self.assertEqual(zerver_realmemoji[0]["deactivated"], False)
self.assertEqual(zerver_realmemoji[1]["name"], "check")
self.assertEqual(zerver_realmemoji[1]["file_name"], "tick.png")
self.assertEqual(zerver_realmemoji[1]["realm"], 3)
self.assertEqual(zerver_realmemoji[1]["deactivated"], False)
self.assertEqual(zerver_realmemoji[2]["name"], "zulip")
self.assertEqual(zerver_realmemoji[2]["file_name"], "zulip.png")
self.assertEqual(zerver_realmemoji[2]["realm"], 3)
self.assertEqual(zerver_realmemoji[2]["deactivated"], False)
records_file = os.path.join(output_dir, "emoji", "records.json")
with open(records_file, "rb") as f:
records_json = orjson.loads(f.read())
self.assertEqual(records_json[0]["name"], "tick")
self.assertEqual(records_json[0]["file_name"], "tick.png")
self.assertEqual(records_json[0]["realm_id"], 3)
self.assertEqual(records_json[1]["name"], "check")
self.assertEqual(records_json[1]["file_name"], "tick.png")
self.assertEqual(records_json[1]["realm_id"], 3)
self.assertTrue(os.path.isfile(records_json[0]["path"]))
self.assertEqual(records_json[2]["name"], "zulip")
self.assertEqual(records_json[2]["file_name"], "zulip.png")
self.assertEqual(records_json[2]["realm_id"], 3)
self.assertTrue(os.path.isfile(records_json[2]["path"]))
def test_map_receiver_id_to_recipient_id(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
realm_id = 3
domain_name = "zulip.com"
user_handler = UserHandler()
subscriber_handler = SubscriberHandler()
user_id_mapper = IdMapper()
stream_id_mapper = IdMapper()
huddle_id_mapper = IdMapper()
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
livechat_id_to_livechat_map: Dict[str, Dict[str, Any]] = {}
with self.assertLogs(level="INFO"):
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
)
zerver_stream = convert_channel_data(
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
stream_id_mapper=stream_id_mapper,
realm_id=realm_id,
)
zerver_huddle = convert_huddle_data(
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
huddle_id_mapper=huddle_id_mapper,
user_id_mapper=user_id_mapper,
subscriber_handler=subscriber_handler,
)
all_users = user_handler.get_all_users()
zerver_recipient = build_recipients(
zerver_userprofile=all_users,
zerver_stream=zerver_stream,
zerver_huddle=zerver_huddle,
)
stream_id_to_recipient_id: Dict[int, int] = {}
user_id_to_recipient_id: Dict[int, int] = {}
huddle_id_to_recipient_id: Dict[int, int] = {}
map_receiver_id_to_recipient_id(
zerver_recipient=zerver_recipient,
stream_id_to_recipient_id=stream_id_to_recipient_id,
user_id_to_recipient_id=user_id_to_recipient_id,
huddle_id_to_recipient_id=huddle_id_to_recipient_id,
)
# 6 for streams and 6 for users.
self.assert_length(zerver_recipient, 13)
self.assert_length(stream_id_to_recipient_id, 6)
self.assert_length(user_id_to_recipient_id, 6)
self.assert_length(huddle_id_to_recipient_id, 1)
# First user recipients are built, followed by stream recipients in `build_recipients`.
self.assertEqual(
user_id_to_recipient_id[zerver_recipient[0]["type_id"]], zerver_recipient[0]["id"]
)
self.assertEqual(
user_id_to_recipient_id[zerver_recipient[1]["type_id"]], zerver_recipient[1]["id"]
)
self.assertEqual(
stream_id_to_recipient_id[zerver_recipient[6]["type_id"]], zerver_recipient[6]["id"]
)
self.assertEqual(
stream_id_to_recipient_id[zerver_recipient[7]["type_id"]], zerver_recipient[7]["id"]
)
self.assertEqual(
huddle_id_to_recipient_id[zerver_recipient[12]["type_id"]], zerver_recipient[12]["id"]
)
def test_separate_channel_private_and_livechat_messages(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
livechat_id_to_livechat_map: Dict[str, Dict[str, Any]] = {}
with self.assertLogs(level="INFO"):
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
)
channel_messages: List[Dict[str, Any]] = []
private_messages: List[Dict[str, Any]] = []
livechat_messages: List[Dict[str, Any]] = []
separate_channel_private_and_livechat_messages(
messages=rocketchat_data["message"],
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
channel_messages=channel_messages,
private_messages=private_messages,
livechat_messages=livechat_messages,
)
self.assert_length(rocketchat_data["message"], 87)
self.assert_length(channel_messages, 68)
self.assert_length(private_messages, 11)
self.assert_length(livechat_messages, 8)
self.assertIn(rocketchat_data["message"][0], channel_messages)
self.assertIn(rocketchat_data["message"][1], channel_messages)
self.assertIn(rocketchat_data["message"][4], channel_messages)
self.assertIn(rocketchat_data["message"][11], private_messages)
self.assertIn(rocketchat_data["message"][12], private_messages)
self.assertIn(rocketchat_data["message"][50], private_messages) # Huddle message
self.assertIn(rocketchat_data["message"][79], livechat_messages)
self.assertIn(rocketchat_data["message"][83], livechat_messages)
self.assertIn(rocketchat_data["message"][86], livechat_messages)
# Message in a Discussion originating from a direct channel
self.assertIn(rocketchat_data["message"][70], private_messages)
self.assertIn(rocketchat_data["message"][70]["rid"], direct_id_to_direct_map)
# Add a message with no `rid`
rocketchat_data["message"].append(
{
"_id": "p4v37myxc6yLZ8AHh",
"t": "livechat_navigation_history",
"ts": datetime.datetime(2019, 11, 6, 0, 38, 42, 796000),
"msg": " - applewebdata://9124F033-BFEF-43C5-9215-DA369E4DA22D",
"u": {"_id": "rocket.cat", "username": "cat"},
"groupable": False,
"unread": True,
"navigation": {
"page": {
"change": "url",
"title": "",
"location": {"href": "applewebdata://9124F033-BFEF-43C5-9215-DA369E4DA22D"},
},
"token": "ebxuypgh0updo6klkobzhp",
},
"expireAt": 1575592722794.0,
"_hidden": True,
"_updatedAt": datetime.datetime(2019, 11, 6, 0, 38, 42, 796000),
}
)
channel_messages = []
private_messages = []
livechat_messages = []
separate_channel_private_and_livechat_messages(
messages=rocketchat_data["message"],
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
livechat_id_to_livechat_map=livechat_id_to_livechat_map,
channel_messages=channel_messages,
private_messages=private_messages,
livechat_messages=livechat_messages,
)
# No new message added to channel, private or livechat messages
self.assert_length(channel_messages, 68)
self.assert_length(private_messages, 11)
self.assert_length(livechat_messages, 8)
def test_map_upload_id_to_upload_data(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
upload_id_to_upload_data_map = map_upload_id_to_upload_data(rocketchat_data["upload"])
self.assert_length(rocketchat_data["upload"]["upload"], 4)
self.assert_length(upload_id_to_upload_data_map, 4)
upload_id = rocketchat_data["upload"]["upload"][0]["_id"]
upload_name = rocketchat_data["upload"]["upload"][0]["name"]
self.assertEqual(upload_id_to_upload_data_map[upload_id]["name"], upload_name)
self.assert_length(upload_id_to_upload_data_map[upload_id]["chunk"], 1)
def test_build_reactions(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
output_dir = self.make_import_output_dir("rocketchat")
with self.assertLogs(level="INFO"):
zerver_realmemoji = build_custom_emoji(
realm_id=3,
custom_emoji_data=rocketchat_data["custom_emoji"],
output_dir=output_dir,
)
total_reactions: List[ZerverFieldsT] = []
reactions = [
{"name": "grin", "user_id": 3},
{"name": "grinning", "user_id": 3},
{"name": "innocent", "user_id": 2},
{"name": "star_struck", "user_id": 4},
{"name": "heart", "user_id": 3},
{"name": "rocket", "user_id": 4},
{"name": "check", "user_id": 2},
{"name": "zulip", "user_id": 3},
{"name": "harry-ron", "user_id": 4},
]
build_reactions(
total_reactions=total_reactions,
reactions=reactions,
message_id=3,
zerver_realmemoji=zerver_realmemoji,
)
# :grin: and :star_struck: are not present in Zulip's default
# emoji set, or in Reaction.UNICODE_EMOJI reaction type.
self.assert_length(total_reactions, 7)
grinning_emoji_code = name_to_codepoint["grinning"]
innocent_emoji_code = name_to_codepoint["innocent"]
heart_emoji_code = name_to_codepoint["heart"]
rocket_emoji_code = name_to_codepoint["rocket"]
realmemoji_code = {}
for emoji in zerver_realmemoji:
realmemoji_code[emoji["name"]] = emoji["id"]
self.assertEqual(
self.get_set(total_reactions, "reaction_type"),
{Reaction.UNICODE_EMOJI, Reaction.REALM_EMOJI},
)
self.assertEqual(
self.get_set(total_reactions, "emoji_name"),
{"grinning", "innocent", "heart", "rocket", "check", "zulip", "harry-ron"},
)
self.assertEqual(
self.get_set(total_reactions, "emoji_code"),
{
grinning_emoji_code,
innocent_emoji_code,
heart_emoji_code,
rocket_emoji_code,
realmemoji_code["check"],
realmemoji_code["zulip"],
realmemoji_code["harry-ron"],
},
)
self.assertEqual(self.get_set(total_reactions, "user_profile"), {2, 3, 4})
self.assert_length(self.get_set(total_reactions, "id"), 7)
self.assert_length(self.get_set(total_reactions, "message"), 1)
def test_process_message_attachment(self) -> None:
fixture_dir_name = self.fixture_file_name("", "rocketchat_fixtures")
rocketchat_data = rocketchat_data_to_dict(fixture_dir_name)
output_dir = self.make_import_output_dir("mattermost")
user_id_to_user_map = map_user_id_to_user(rocketchat_data["user"])
realm_id = 3
domain_name = "zulip.com"
user_handler = UserHandler()
user_id_mapper = IdMapper()
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
zerver_attachments: List[ZerverFieldsT] = []
uploads_list: List[ZerverFieldsT] = []
upload_id_to_upload_data_map = map_upload_id_to_upload_data(rocketchat_data["upload"])
message_with_attachment = rocketchat_data["message"][55]
process_message_attachment(
upload=message_with_attachment["file"],
realm_id=3,
message_id=1,
user_id=3,
user_handler=user_handler,
zerver_attachment=zerver_attachments,
uploads_list=uploads_list,
upload_id_to_upload_data_map=upload_id_to_upload_data_map,
output_dir=output_dir,
)
self.assert_length(zerver_attachments, 1)
self.assertEqual(zerver_attachments[0]["file_name"], "harry-ron.jpg")
self.assertEqual(zerver_attachments[0]["owner"], 3)
self.assertEqual(
user_handler.get_user(zerver_attachments[0]["owner"])["email"], "harrypotter@email.com"
)
# TODO: Assert this for False after fixing the file permissions in PMs
self.assertTrue(zerver_attachments[0]["is_realm_public"])
self.assert_length(uploads_list, 1)
self.assertEqual(uploads_list[0]["user_profile_email"], "harrypotter@email.com")
attachment_out_path = os.path.join(output_dir, "uploads", zerver_attachments[0]["path_id"])
self.assertTrue(os.path.exists(attachment_out_path))
self.assertTrue(os.path.isfile(attachment_out_path))
def read_file(self, team_output_dir: str, output_file: str) -> Any:
full_path = os.path.join(team_output_dir, output_file)
with open(full_path, "rb") as f:
return orjson.loads(f.read())
def test_do_convert_data(self) -> None:
rocketchat_data_dir = self.fixture_file_name("", "rocketchat_fixtures")
output_dir = self.make_import_output_dir("rocketchat")
with self.assertLogs(level="INFO") as info_log, self.settings(
EXTERNAL_HOST="zulip.example.com"
):
# We need to mock EXTERNAL_HOST to be a valid domain because rocketchat's importer
# uses it to generate email addresses for users without an email specified.
do_convert_data(
rocketchat_data_dir=rocketchat_data_dir,
output_dir=output_dir,
)
self.assertEqual(
info_log.output,
[
"INFO:root:Huddle channel found. UIDs: ['LdBZ7kPxtKESyHPEe', 'M2sXGqoQRJQwQoXY2', 'os6N2Xg2JkNMCSW9Z'] -> hash 752a5854d2b6eec337fe81f0066a5dd72c3f0639",
"INFO:root:Starting to process custom emoji",
"INFO:root:Done processing emoji",
"INFO:root:skipping direct messages discussion mention: Discussion with Hermione",
"INFO:root:Start making tarball",
"INFO:root:Done making tarball",
],
)
self.assertEqual(os.path.exists(os.path.join(output_dir, "avatars")), True)
self.assertEqual(os.path.exists(os.path.join(output_dir, "emoji")), True)
self.assertEqual(os.path.exists(os.path.join(output_dir, "uploads")), True)
self.assertEqual(os.path.exists(os.path.join(output_dir, "attachment.json")), True)
realm = self.read_file(output_dir, "realm.json")
self.assertEqual(
"Organization imported from Rocket.Chat!", realm["zerver_realm"][0]["description"]
)
exported_user_ids = self.get_set(realm["zerver_userprofile"], "id")
self.assert_length(exported_user_ids, 6)
exported_user_full_names = self.get_set(realm["zerver_userprofile"], "full_name")
self.assertEqual(
exported_user_full_names,
{
"Rocket.Cat",
"Priyansh Garg",
"Harry Potter",
"Hermione Granger",
"Ron Weasley",
"Lord Voldemort",
},
)
exported_user_emails = self.get_set(realm["zerver_userprofile"], "email")
self.assertEqual(
exported_user_emails,
{
"rocket.cat-bot@zulip.example.com",
"priyansh3133@email.com",
"harrypotter@email.com",
"hermionegranger@email.com",
"ronweasley@email.com",
"lordvoldemort@email.com",
},
)
self.assert_length(realm["zerver_stream"], 6)
exported_stream_names = self.get_set(realm["zerver_stream"], "name")
self.assertEqual(
exported_stream_names,
{
"general",
"random",
"gryffindor-common-room",
"[TEAM] team-harry-potter",
"heya",
"thp-channel-2",
},
)
self.assertEqual(
self.get_set(realm["zerver_stream"], "realm"), {realm["zerver_realm"][0]["id"]}
)
self.assertEqual(self.get_set(realm["zerver_stream"], "deactivated"), {False})
self.assert_length(realm["zerver_defaultstream"], 0)
exported_recipient_ids = self.get_set(realm["zerver_recipient"], "id")
self.assert_length(exported_recipient_ids, 13)
exported_recipient_types = self.get_set(realm["zerver_recipient"], "type")
self.assertEqual(exported_recipient_types, {1, 2, 3})
exported_subscription_userprofile = self.get_set(
realm["zerver_subscription"], "user_profile"
)
self.assert_length(exported_subscription_userprofile, 6)
exported_subscription_recipients = self.get_set(realm["zerver_subscription"], "recipient")
self.assert_length(exported_subscription_recipients, 13)
messages = self.read_file(output_dir, "messages-000001.json")
exported_messages_id = self.get_set(messages["zerver_message"], "id")
self.assertIn(messages["zerver_message"][0]["sender"], exported_user_ids)
self.assertIn(messages["zerver_message"][0]["recipient"], exported_recipient_ids)
self.assertIn(
messages["zerver_message"][0]["content"], "Hey everyone, how's it going??\n\n"
)
exported_usermessage_userprofiles = self.get_set(
messages["zerver_usermessage"], "user_profile"
)
# Rocket.Cat is not subscribed to any recipient (stream/PMs) with messages.
self.assert_length(exported_usermessage_userprofiles, 5)
exported_usermessage_messages = self.get_set(messages["zerver_usermessage"], "message")
self.assertEqual(exported_usermessage_messages, exported_messages_id)
with self.assertLogs(level="INFO"):
do_import_realm(
import_dir=output_dir,
subdomain="hogwarts",
)
realm = get_realm("hogwarts")
self.assertFalse(get_user("rocket.cat-bot@zulip.example.com", realm).is_mirror_dummy)
self.assertTrue(get_user("rocket.cat-bot@zulip.example.com", realm).is_bot)
self.assertFalse(get_user("harrypotter@email.com", realm).is_mirror_dummy)
self.assertFalse(get_user("harrypotter@email.com", realm).is_bot)
self.assertFalse(get_user("ronweasley@email.com", realm).is_mirror_dummy)
self.assertFalse(get_user("ronweasley@email.com", realm).is_bot)
self.assertFalse(get_user("hermionegranger@email.com", realm).is_mirror_dummy)
self.assertFalse(get_user("hermionegranger@email.com", realm).is_bot)
messages = Message.objects.filter(sender__realm=realm)
for message in messages:
self.assertIsNotNone(message.rendered_content)
# After removing user_joined, added_user, discussion_created, etc.
# messages. (Total messages were 66.)
self.assert_length(messages, 43)
stream_messages = messages.filter(recipient__type=Recipient.STREAM).order_by("date_sent")
stream_recipients = stream_messages.values_list("recipient", flat=True)
self.assert_length(stream_messages, 35)
self.assert_length(set(stream_recipients), 5)
self.assertEqual(stream_messages[0].sender.email, "priyansh3133@email.com")
self.assertEqual(stream_messages[0].content, "Hey everyone, how's it going??")
self.assertEqual(stream_messages[23].sender.email, "harrypotter@email.com")
self.assertRegex(
stream_messages[23].content,
"Just a random pic!\n\n\\[harry-ron.jpg\\]\\(.*\\)",
)
self.assertTrue(stream_messages[23].has_attachment)
self.assertTrue(stream_messages[23].has_image)
self.assertTrue(stream_messages[23].has_link)
huddle_messages = messages.filter(recipient__type=Recipient.HUDDLE).order_by("date_sent")
huddle_recipients = huddle_messages.values_list("recipient", flat=True)
self.assert_length(huddle_messages, 4)
self.assert_length(set(huddle_recipients), 1)
self.assertEqual(huddle_messages[0].sender.email, "hermionegranger@email.com")
self.assertEqual(huddle_messages[0].content, "Hey people!")
self.assertEqual(huddle_messages[2].sender.email, "harrypotter@email.com")
self.assertRegex(
huddle_messages[2].content,
"This year's curriculum is out.\n\n\\[Hogwarts Curriculum.pdf\\]\\(.*\\)",
)
self.assertTrue(huddle_messages[2].has_attachment)
self.assertFalse(huddle_messages[2].has_image)
self.assertTrue(huddle_messages[2].has_link)
personal_messages = messages.filter(recipient__type=Recipient.PERSONAL).order_by(
"date_sent"
)
personal_recipients = personal_messages.values_list("recipient", flat=True)
self.assert_length(personal_messages, 4)
self.assert_length(set(personal_recipients), 2)
self.assertEqual(personal_messages[0].sender.email, "harrypotter@email.com")
self.assertEqual(
personal_messages[0].content,
"Hey @**Hermione Granger** :grin:, how's everything going?",
)
self.verify_emoji_code_foreign_keys()
def test_truncate_name(self) -> None:
self.assertEqual("foobar", truncate_name("foobar", 42, 60))
self.assertEqual("1234567890 [42]", truncate_name("12345678901234567890", 42, 15))
| {
"content_hash": "9e7f4ed9ed43dc9dfe4ae95de0694180",
"timestamp": "",
"source": "github",
"line_count": 1046,
"max_line_length": 169,
"avg_line_length": 43.44933078393881,
"alnum_prop": 0.6006424925189228,
"repo_name": "andersk/zulip",
"id": "2447f089b85202ff45f756b71ba622002920ddf5",
"size": "45448",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "zerver/tests/test_rocketchat_importer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "490256"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "749848"
},
{
"name": "Handlebars",
"bytes": "377098"
},
{
"name": "JavaScript",
"bytes": "4006373"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10168530"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284837"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Addition'
db.create_table(u'reserved_addition', (
(u'namedobject_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['reserved.NamedObject'], unique=True, primary_key=True)),
('min_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('max_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'reserved', ['Addition'])
# Adding M2M table for field extras on 'Category'
m2m_table_name = db.shorten_name(u'reserved_category_extras')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('category', models.ForeignKey(orm[u'reserved.category'], null=False)),
('addition', models.ForeignKey(orm[u'reserved.addition'], null=False))
))
db.create_unique(m2m_table_name, ['category_id', 'addition_id'])
# Adding M2M table for field extras on 'Menu'
m2m_table_name = db.shorten_name(u'reserved_menu_extras')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('menu', models.ForeignKey(orm[u'reserved.menu'], null=False)),
('addition', models.ForeignKey(orm[u'reserved.addition'], null=False))
))
db.create_unique(m2m_table_name, ['menu_id', 'addition_id'])
# Adding M2M table for field extras on 'Product'
m2m_table_name = db.shorten_name(u'reserved_product_extras')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('product', models.ForeignKey(orm[u'reserved.product'], null=False)),
('addition', models.ForeignKey(orm[u'reserved.addition'], null=False))
))
db.create_unique(m2m_table_name, ['product_id', 'addition_id'])
def backwards(self, orm):
# Deleting model 'Addition'
db.delete_table(u'reserved_addition')
# Removing M2M table for field extras on 'Category'
db.delete_table(db.shorten_name(u'reserved_category_extras'))
# Removing M2M table for field extras on 'Menu'
db.delete_table(db.shorten_name(u'reserved_menu_extras'))
# Removing M2M table for field extras on 'Product'
db.delete_table(db.shorten_name(u'reserved_product_extras'))
models = {
u'account.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['account.UserProfile']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'easy_maps.address': {
'Meta': {'object_name': 'Address'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'computed_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geocode_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'reserved.addition': {
'Meta': {'object_name': 'Addition', '_ormbases': [u'reserved.NamedObject']},
'max_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'min_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'namedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['reserved.NamedObject']", 'unique': 'True', 'primary_key': 'True'})
},
u'reserved.booking': {
'Meta': {'object_name': 'Booking'},
'arrival': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.Company']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'customers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Customer']", 'null': 'True', 'blank': 'True'}),
'depature': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'initial'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'})
},
u'reserved.category': {
'Meta': {'object_name': 'Category', '_ormbases': [u'reserved.NamedObject']},
'extras': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Addition']", 'null': 'True', 'blank': 'True'}),
u'namedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['reserved.NamedObject']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.Category']", 'null': 'True', 'blank': 'True'})
},
u'reserved.company': {
'Meta': {'object_name': 'Company'},
'addresses': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['reserved.Location']", 'symmetrical': 'False'}),
'contact': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['account.UserProfile']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['reserved.Telephone']", 'symmetrical': 'False'})
},
u'reserved.customer': {
'Meta': {'object_name': 'Customer'},
'address': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['reserved.Location']", 'symmetrical': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'reserved.event': {
'Meta': {'object_name': 'Event'},
'bookings': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Booking']", 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.Company']", 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'venues': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['reserved.Venue']", 'symmetrical': 'False'})
},
u'reserved.location': {
'Meta': {'object_name': 'Location', '_ormbases': [u'easy_maps.Address']},
u'address_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['easy_maps.Address']", 'unique': 'True', 'primary_key': 'True'}),
'location_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'reserved.menu': {
'Meta': {'object_name': 'Menu', '_ormbases': [u'reserved.NamedObject']},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Category']", 'null': 'True', 'blank': 'True'}),
'extras': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Addition']", 'null': 'True', 'blank': 'True'}),
u'namedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['reserved.NamedObject']", 'unique': 'True', 'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Product']", 'null': 'True', 'blank': 'True'}),
'times': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Timing']", 'null': 'True', 'blank': 'True'})
},
u'reserved.namedobject': {
'Meta': {'object_name': 'NamedObject'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'icon_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'reserved.product': {
'Meta': {'object_name': 'Product', '_ormbases': [u'reserved.NamedObject']},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Category']", 'null': 'True', 'blank': 'True'}),
'extras': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['reserved.Addition']", 'null': 'True', 'blank': 'True'}),
u'namedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['reserved.NamedObject']", 'unique': 'True', 'primary_key': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {})
},
u'reserved.telephone': {
'Meta': {'object_name': 'Telephone'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.TelephoneType']"}),
'number': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.Customer']"})
},
u'reserved.telephonetype': {
'Meta': {'object_name': 'TelephoneType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'reserved.timing': {
'Meta': {'object_name': 'Timing', '_ormbases': [u'reserved.NamedObject']},
'active_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'deactive_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'namedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['reserved.NamedObject']", 'unique': 'True', 'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'reserved.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.Location']", 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reserved.Company']"}),
'contact': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['reserved.Customer']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.UserProfile']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['reserved'] | {
"content_hash": "9f37bc977a67c0759981e0b69fa506fa",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 195,
"avg_line_length": 75.94954128440367,
"alnum_prop": 0.5670713293471039,
"repo_name": "Strangemother/coffee",
"id": "63fd3db1e941987db534356383085ef375961edd",
"size": "16581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reserved/migrations/0004_auto__add_addition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "310225"
},
{
"name": "HTML",
"bytes": "532020"
},
{
"name": "JavaScript",
"bytes": "362136"
},
{
"name": "Python",
"bytes": "200655"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0003_auto_20160209_1048'),
]
operations = [
migrations.AddField(
model_name='group',
name='count',
field=models.IntegerField(null=True),
),
]
| {
"content_hash": "facc736c5e8391ce5e8f838eefb438cb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 20.77777777777778,
"alnum_prop": 0.5882352941176471,
"repo_name": "xkmato/casepro",
"id": "50d99e1606be4f087dd5f63f2c5a03326c002260",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "casepro/contacts/migrations/0004_group_count.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3475"
},
{
"name": "CoffeeScript",
"bytes": "220522"
},
{
"name": "HTML",
"bytes": "104527"
},
{
"name": "PLpgSQL",
"bytes": "6012"
},
{
"name": "Python",
"bytes": "878626"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from typing import (
AbstractSet, Any, AnyStr, Callable, Dict, Iterable, Mapping, MutableMapping,
Optional, Sequence, Set, Text, Tuple, TypeVar, Union
)
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core import validators
from django.contrib.sessions.models import Session
from zerver.lib.bugdown import (
BugdownRenderingException,
version as bugdown_version
)
from zerver.lib.cache import (
to_dict_cache_key,
to_dict_cache_key_id,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.message import (
access_message,
MessageDict,
message_to_dict,
render_markdown,
)
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, RealmAlias, \
Subscription, Recipient, Message, Attachment, UserMessage, valid_stream_name, \
Client, DefaultStream, UserPresence, Referral, PushDeviceToken, MAX_SUBJECT_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, get_recipient, get_huddle, \
get_user_profile_by_id, PreregistrationUser, get_display_recipient, \
get_realm, bulk_get_recipients, \
email_allowed_for_realm, email_to_username, display_recipient_cache_key, \
get_user_profile_by_email, get_stream_cache_key, \
UserActivityInterval, get_active_user_dicts_in_realm, get_active_streams, \
realm_filters_for_realm, RealmFilter, receives_offline_notifications, \
ScheduledJob, get_owned_bot_dicts, \
get_old_unclaimed_attachments, get_cross_realm_emails, receives_online_notifications, \
Reaction
from zerver.lib.alert_words import alert_words_in_realm
from zerver.lib.avatar import get_avatar_url, avatar_url
from django.db import transaction, IntegrityError, connection
from django.db.models import F, Q
from django.db.models.query import QuerySet
from django.core.exceptions import ValidationError
from importlib import import_module
from django.core.mail import EmailMessage
from django.utils.timezone import now
from confirmation.models import Confirmation
import six
from six.moves import filter
from six.moves import map
from six.moves import range
from six import unichr
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.create_user import random_api_key
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.queue import queue_json_publish
from django.utils import timezone
from zerver.lib.create_user import create_user
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, cache_set_many, \
cache_delete, cache_delete_many
from zerver.decorator import statsd_increment
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.html_diff import highlight_html_differences
from zerver.lib.alert_words import user_alert_words, add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.push_notifications import num_push_devices_for_user, \
send_apple_push_notification, send_android_push_notification
from zerver.lib.notifications import clear_followup_emails_queue
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.request import JsonableError
from zerver.lib.session_user import get_session_user
from zerver.lib.upload import attachment_url_re, attachment_url_to_path_id, \
claim_attachment, delete_message_image
from zerver.lib.str_utils import NonBinaryStr, force_str
from zerver.tornado.event_queue import request_event_queue, get_user_events, send_event
import DNS
import ujson
import time
import traceback
import re
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
import copy
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[Text], AbstractSet[Text]]
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
# Store an event in the log for re-importing messages
def log_event(event):
# type: (MutableMapping[str, Any]) -> None
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node()
+ datetime.datetime.now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(force_str(ujson.dumps(event) + u'\n'))
def active_user_ids(realm):
# type: (Realm) -> List[int]
return [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
def can_access_stream_user_ids(stream):
# type: (Stream) -> Set[int]
# return user ids of users who can access the attributes of
# a stream, such as its name/description
if stream.is_public():
return set(active_user_ids(stream.realm))
else:
return private_stream_user_ids(stream)
def private_stream_user_ids(stream):
# type: (Stream) -> Set[int]
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
active=True)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def bot_owner_userids(user_profile):
# type: (UserProfile) -> Sequence[int]
is_private_bot = (
user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return (user_profile.bot_owner_id,) # TODO: change this to list instead of tuple
else:
return active_user_ids(user_profile.realm)
def realm_user_count(realm):
# type: (Realm) -> int
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def get_topic_history_for_stream(user_profile, recipient):
# type: (UserProfile, Recipient) -> List[Tuple[str, int]]
# We tested the below query on some large prod datasets, and we never
# saw more than 50ms to execute it, so we think that's acceptable,
# but we will monitor it, and we may later optimize it further.
query = '''
SELECT topic, read, count(*)
FROM (
SELECT
("zerver_usermessage"."flags" & 1) as read,
"zerver_message"."subject" as topic,
"zerver_message"."id" as message_id
FROM "zerver_usermessage"
INNER JOIN "zerver_message" ON (
"zerver_usermessage"."message_id" = "zerver_message"."id"
) WHERE (
"zerver_usermessage"."user_profile_id" = %s AND
"zerver_message"."recipient_id" = %s
) ORDER BY "zerver_usermessage"."message_id" DESC
) messages_for_stream
GROUP BY topic, read
ORDER BY max(message_id) desc
'''
cursor = connection.cursor()
cursor.execute(query, [user_profile.id, recipient.id])
rows = cursor.fetchall()
cursor.close()
topic_names = dict() # type: Dict[str, str]
topic_counts = dict() # type: Dict[str, int]
topics = []
for row in rows:
topic_name, read, count = row
if topic_name.lower() not in topic_names:
topic_names[topic_name.lower()] = topic_name
topic_name = topic_names[topic_name.lower()]
if topic_name not in topic_counts:
topic_counts[topic_name] = 0
topics.append(topic_name)
if not read:
topic_counts[topic_name] += count
history = [(topic, topic_counts[topic]) for topic in topics]
return history
def send_signup_message(sender, signups_stream, user_profile,
internal=False, realm=None):
# type: (UserProfile, Text, UserProfile, bool, Optional[Realm]) -> None
if internal:
# When this is done using manage.py vs. the web interface
internal_blurb = " **INTERNAL SIGNUP** "
else:
internal_blurb = " "
user_count = realm_user_count(user_profile.realm)
# Send notification to realm notifications stream if it exists
# Don't send notification for the first user in a realm
if user_profile.realm.notifications_stream is not None and user_count > 1:
internal_send_message(
sender,
"stream",
user_profile.realm.notifications_stream.name,
"New users", "%s just signed up for Zulip. Say hello!" % (
user_profile.full_name,),
realm=user_profile.realm)
internal_send_message(
sender,
"stream",
signups_stream,
user_profile.realm.domain,
"%s <`%s`> just signed up for Zulip!%s(total: **%i**)" % (
user_profile.full_name,
user_profile.email,
internal_blurb,
user_count,
)
)
def notify_new_user(user_profile, internal=False):
# type: (UserProfile, bool) -> None
if settings.NEW_USER_BOT is not None:
send_signup_message(settings.NEW_USER_BOT, "signups", user_profile, internal)
statsd.gauge("users.signups.%s" % (user_profile.realm.domain.replace('.', '_')), 1, delta=True)
def add_new_user_history(user_profile, streams):
# type: (UserProfile, Iterable[Stream]) -> None
"""Give you the last 100 messages on your public streams, so you have
something to look at in your home view once you finish the
tutorial."""
one_week_ago = now() - datetime.timedelta(weeks=1)
recipients = Recipient.objects.filter(type=Recipient.STREAM,
type_id__in=[stream.id for stream in streams
if not stream.invite_only])
recent_messages = Message.objects.filter(recipient_id__in=recipients,
pub_date__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list('id', flat=True)[0:100]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id", flat=True))
ums_to_create = [UserMessage(user_profile=user_profile, message_id=message_id,
flags=UserMessage.flags.read)
for message_id in message_ids_to_use
if message_id not in already_ids]
UserMessage.objects.bulk_create(ums_to_create)
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile, prereg_user=None, newsletter_data=None):
# type: (UserProfile, Optional[PreregistrationUser], Optional[Dict[str, str]]) -> None
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
try:
streams = prereg_user.streams.all()
except AttributeError:
# This will catch both the case where prereg_user is None and where it
# is a MitUser.
streams = []
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
bulk_add_subscriptions(streams, [user_profile])
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None \
and settings.NOTIFICATION_BOT is not None:
# This is a cross-realm private message.
internal_send_message(
settings.NOTIFICATION_BOT,
"private",
prereg_user.referred_by.email,
user_profile.realm.domain,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).exclude(
id=prereg_user.id).update(status=0)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).update(status=0)
notify_new_user(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.email,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile):
# type: (UserProfile) -> None
event = dict(type="realm_user", op="add",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=user_profile.is_realm_admin,
full_name=user_profile.full_name,
is_bot=user_profile.is_bot))
send_event(event, active_user_ids(user_profile.realm))
def notify_created_bot(user_profile):
# type: (UserProfile) -> None
def stream_name(stream):
# type: (Stream) -> Optional[Text]
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
event = dict(type="realm_bot", op="add",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
api_key=user_profile.api_key,
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
owner=user_profile.bot_owner.email,
))
send_event(event, bot_owner_userids(user_profile))
def do_create_user(email, password, realm, full_name, short_name,
active=True, bot_type=None, bot_owner=None, tos_version=None,
avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream=None, default_events_register_stream=None,
default_all_public_streams=None, prereg_user=None,
newsletter_data=None):
# type: (Text, Text, Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, Optional[Stream], Optional[Stream], bool, Optional[PreregistrationUser], Optional[Dict[str, str]]) -> UserProfile
event = {'type': 'user_created',
'timestamp': time.time(),
'full_name': full_name,
'short_name': short_name,
'user': email,
'domain': realm.domain,
'bot': bool(bot_type)}
if bot_type:
event['bot_owner'] = bot_owner.email
log_event(event)
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
active=active, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
notify_created_user(user_profile)
if bot_type:
notify_created_bot(user_profile)
else:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data)
return user_profile
def user_sessions(user_profile):
# type: (UserProfile) -> List[Session]
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session):
# type: (Session) -> None
session_engine.SessionStore(session.session_key).delete() # type: ignore # import_module
def delete_user_sessions(user_profile):
# type: (UserProfile) -> None
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm):
# type: (Realm) -> None
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=datetime.datetime.now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions():
# type: () -> None
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions():
# type: () -> None
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None:
continue
user_profile = get_user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.email,))
delete_session(session)
def active_humans_in_realm(realm):
# type: (Realm) -> Sequence[UserProfile]
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_name(realm, name):
# type: (Realm, Text) -> None
realm.name = name
realm.save(update_fields=['name'])
event = dict(
type="realm",
op="update",
property='name',
value=name,
)
send_event(event, active_user_ids(realm))
def do_set_realm_restricted_to_domain(realm, restricted):
# type: (Realm, bool) -> None
realm.restricted_to_domain = restricted
realm.save(update_fields=['restricted_to_domain'])
event = dict(
type="realm",
op="update",
property='restricted_to_domain',
value=restricted,
)
send_event(event, active_user_ids(realm))
def do_set_realm_invite_required(realm, invite_required):
# type: (Realm, bool) -> None
realm.invite_required = invite_required
realm.save(update_fields=['invite_required'])
event = dict(
type="realm",
op="update",
property='invite_required',
value=invite_required,
)
send_event(event, active_user_ids(realm))
def do_set_realm_invite_by_admins_only(realm, invite_by_admins_only):
# type: (Realm, bool) -> None
realm.invite_by_admins_only = invite_by_admins_only
realm.save(update_fields=['invite_by_admins_only'])
event = dict(
type="realm",
op="update",
property='invite_by_admins_only',
value=invite_by_admins_only,
)
send_event(event, active_user_ids(realm))
def do_set_realm_authentication_methods(realm, authentication_methods):
# type: (Realm, Dict[str, bool]) -> None
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict())
)
send_event(event, active_user_ids(realm))
def do_set_realm_create_stream_by_admins_only(realm, create_stream_by_admins_only):
# type: (Realm, bool) -> None
realm.create_stream_by_admins_only = create_stream_by_admins_only
realm.save(update_fields=['create_stream_by_admins_only'])
event = dict(
type="realm",
op="update",
property='create_stream_by_admins_only',
value=create_stream_by_admins_only,
)
send_event(event, active_user_ids(realm))
def do_set_realm_add_emoji_by_admins_only(realm, add_emoji_by_admins_only):
# type: (Realm, bool) -> None
realm.add_emoji_by_admins_only = add_emoji_by_admins_only
realm.save(update_fields=['add_emoji_by_admins_only'])
event = dict(
type="realm",
op="update",
property='add_emoji_by_admins_only',
value=add_emoji_by_admins_only,
)
send_event(event, active_user_ids(realm))
def do_set_realm_message_editing(realm, allow_message_editing, message_content_edit_limit_seconds):
# type: (Realm, bool, int) -> None
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.save(update_fields=['allow_message_editing', 'message_content_edit_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds),
)
send_event(event, active_user_ids(realm))
def do_set_realm_default_language(realm, default_language):
# type: (Realm, Text) -> None
if default_language == 'zh_CN':
# NB: remove this once we upgrade to Django 1.9
# zh-cn and zh-tw will be replaced by zh-hans and zh-hant in
# Django 1.9
default_language = 'zh_HANS'
realm.default_language = default_language
realm.save(update_fields=['default_language'])
event = dict(
type="realm",
op="update",
property="default_language",
value=default_language
)
send_event(event, active_user_ids(realm))
def do_set_realm_waiting_period_threshold(realm, threshold):
# type: (Realm, int) -> None
realm.waiting_period_threshold = threshold
realm.save(update_fields=['waiting_period_threshold'])
event = dict(
type="realm",
op="update",
property='waiting_period_threshold',
value=threshold,
)
send_event(event, active_user_ids(realm))
def do_deactivate_realm(realm):
# type: (Realm) -> None
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
def do_reactivate_realm(realm):
# type: (Realm) -> None
realm.deactivated = False
realm.save(update_fields=["deactivated"])
def do_deactivate_user(user_profile, log=True, _cascade=True):
# type: (UserProfile, bool, bool) -> None
if not user_profile.is_active:
return
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
if log:
log_event({'type': 'user_deactivated',
'timestamp': time.time(),
'user': user_profile.email,
'domain': user_profile.realm.domain})
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(event, active_user_ids(user_profile.realm))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(event, bot_owner_userids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False)
def do_deactivate_stream(stream, log=True):
# type: (Stream, bool) -> None
user_profiles = UserProfile.objects.filter(realm=stream.realm)
for user_profile in user_profiles:
bulk_remove_subscriptions([user_profile], [stream])
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
existing_deactivated_stream = get_stream(new_name, stream.realm)
if existing_deactivated_stream:
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save()
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm)
cache_delete(old_cache_key)
if not was_invite_only:
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(event, active_user_ids(stream.realm))
def do_change_user_email(user_profile, new_email):
# type: (UserProfile, Text) -> None
old_email = user_profile.email
user_profile.email = new_email
user_profile.save(update_fields=["email"])
log_event({'type': 'user_email_changed',
'old_email': old_email,
'new_email': new_email})
def compute_irc_user_fullname(email):
# type: (NonBinaryStr) -> NonBinaryStr
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email):
# type: (NonBinaryStr) -> NonBinaryStr
return email.split("@")[0] + " (XMPP)"
def compute_mit_user_fullname(email):
# type: (NonBinaryStr) -> NonBinaryStr
try:
# Input is either e.g. username@mit.edu or user|CROSSREALM.INVALID@mit.edu
match_user = re.match(r'^([a-zA-Z0-9_.-]+)(\|.+)?@mit\.edu$', email.lower())
if match_user and match_user.group(2) is None:
answer = DNS.dnslookup(
"%s.passwd.ns.athena.mit.edu" % (match_user.group(1),),
DNS.Type.TXT)
hesiod_name = force_str(answer[0][0]).split(':')[4].split(',')[0].strip()
if hesiod_name != "":
return hesiod_name
elif match_user:
return match_user.group(1).lower() + "@" + match_user.group(2).upper()[1:]
except DNS.Base.ServerError:
pass
except:
print("Error getting fullname for %s:" % (email,))
traceback.print_exc()
return email.lower()
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm, email, email_to_fullname):
# type: (Realm, Text, Callable[[Text], Text]) -> UserProfile
try:
return get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(email, None, realm,
email_to_fullname(email), email_to_username(email),
active=False, is_mirror_dummy=True)
except IntegrityError:
return get_user_profile_by_email(email)
def log_message(message):
# type: (Message) -> None
if not message.sending_client.name.startswith("test:"):
log_event(message.to_log_dict())
# Helper function. Defaults here are overriden by those set in do_send_messages
def do_send_message(message, rendered_content = None, no_log = False, stream = None, local_id = None):
# type: (Union[int, Message], Optional[Text], bool, Optional[Stream], Optional[int]) -> int
return do_send_messages([{'message': message,
'rendered_content': rendered_content,
'no_log': no_log,
'stream': stream,
'local_id': local_id}])[0]
def render_incoming_message(message, content, message_users):
# type: (Message, Text, Set[UserProfile]) -> Text
realm_alert_words = alert_words_in_realm(message.get_realm())
try:
rendered_content = render_markdown(
message=message,
content=content,
realm_alert_words=realm_alert_words,
message_users=message_users,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
def get_recipient_user_profiles(recipient, sender_id):
# type: (Recipient, Text) -> List[UserProfile]
if recipient.type == Recipient.PERSONAL:
recipients = list(set([get_user_profile_by_id(recipient.type_id),
get_user_profile_by_id(sender_id)]))
# For personals, you send out either 1 or 2 copies, for
# personals to yourself or to someone else, respectively.
assert((len(recipients) == 1) or (len(recipients) == 2))
elif (recipient.type == Recipient.STREAM or recipient.type == Recipient.HUDDLE):
# We use select_related()/only() here, while the PERSONAL case above uses
# get_user_profile_by_id() to get UserProfile objects from cache. Streams will
# typically have more recipients than PMs, so get_user_profile_by_id() would be
# a bit more expensive here, given that we need to hit the DB anyway and only
# care about the email from the user profile.
fields = [
'user_profile__id',
'user_profile__email',
'user_profile__enable_online_push_notifications',
'user_profile__is_active',
'user_profile__realm__domain'
]
query = Subscription.objects.select_related("user_profile", "user_profile__realm").only(*fields).filter(
recipient=recipient, active=True)
recipients = [s.user_profile for s in query]
else:
raise ValueError('Bad recipient type')
return recipients
def do_send_messages(messages):
# type: (Sequence[Optional[MutableMapping[str, Any]]]) -> List[int]
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = [] # type: List[int]
new_messages = [] # type: List[MutableMapping[str, Any]]
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['no_log'] = message.get('no_log', False)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
# Log the message to our message log for populate_db to refill
for message in messages:
if not message['no_log']:
log_message(message['message'])
for message in messages:
message['recipients'] = get_recipient_user_profiles(message['message'].recipient,
message['message'].sender_id)
# Only deliver the message to active user recipients
message['active_recipients'] = [user_profile for user_profile in message['recipients']
if user_profile.is_active]
links_for_embed = set() # type: Set[Text]
# Render our messages.
for message in messages:
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message_users=message['active_recipients'])
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
for message in messages:
message['message'].update_calculated_fields()
# Save the message receipts in the database
user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
ums = [] # type: List[UserMessage]
for message in messages:
ums_to_create = [UserMessage(user_profile=user_profile, message=message['message'])
for user_profile in message['active_recipients']]
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message['message'].mentions_wildcard
mentioned_ids = message['message'].mentions_user_ids
ids_with_alert_words = message['message'].user_ids_with_alert_words
is_me_message = message['message'].is_me_message
for um in ums_to_create:
if um.user_profile.id == message['message'].sender.id and \
message['message'].sent_by_human():
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if is_me_message:
um.flags |= UserMessage.flags.is_me_message
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(ums_to_create)
UserMessage.objects.bulk_create(ums)
# Claim attachments in message
for message in messages:
if Message.content_has_attachment(message['message'].content):
do_claim_attachments(message['message'])
for message in messages:
# Render Markdown etc. here and store (automatically) in
# remote cache, so that the single-threaded Tornado server
# doesn't have to.
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
user_presences = get_status_dict(sender)
presences = {}
for user_profile in message['active_recipients']:
if user_profile.email in user_presences:
presences[user_profile.id] = user_presences[user_profile.email]
event = dict(
type = 'message',
message = message['message'].id,
message_dict_markdown = message_to_dict(message['message'], apply_markdown=True),
message_dict_no_markdown = message_to_dict(message['message'], apply_markdown=False),
presences = presences)
users = [{'id': user.id,
'flags': user_flags.get(user.id, []),
'always_push_notify': user.enable_online_push_notifications}
for user in message['active_recipients']]
if message['message'].recipient.type == Recipient.STREAM:
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
message['stream'] = Stream.objects.select_related("realm").get(id=message['message'].recipient.type_id)
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(event, users)
if settings.INLINE_URL_EMBED_PREVIEW and links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data, lambda x: None)
if (settings.ENABLE_FEEDBACK and
message['message'].recipient.type == Recipient.PERSONAL and
settings.FEEDBACK_BOT in [up.email for up in message['recipients']]):
queue_json_publish(
'feedback_messages',
message_to_dict(message['message'], apply_markdown=False),
lambda x: None
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
def do_add_reaction(user_profile, message, emoji_name):
# type: (UserProfile, Message, Text) -> None
reaction = Reaction(user_profile=user_profile, message=message, emoji_name=emoji_name)
reaction.save()
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event = {'type': 'reaction',
'op': 'add',
'user': user_dict,
'message_id': message.id,
'emoji_name': emoji_name} # type: Dict[str, Any]
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
ums = UserMessage.objects.filter(message=message.id)
send_event(event, [um.user_profile_id for um in ums])
def do_remove_reaction(user_profile, message, emoji_name):
# type: (UserProfile, Message, Text) -> None
Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).delete()
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name} # type: Dict[str, Any]
event = {'type': 'reaction',
'op': 'remove',
'user': user_dict,
'message_id': message.id,
'emoji_name': emoji_name} # type: Dict[str, Any]
# Clear the cached message since reaction is removed.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
ums = UserMessage.objects.filter(message=message.id)
send_event(event, [um.user_profile_id for um in ums])
def do_send_typing_notification(notification):
# type: (Dict[str, Any]) -> None
recipient_user_profiles = get_recipient_user_profiles(notification['recipient'],
notification['sender'].id)
# Only deliver the notification to active user recipients
user_ids_to_notify = [profile.id for profile in recipient_user_profiles if profile.is_active]
sender_dict = {'user_id': notification['sender'].id, 'email': notification['sender'].email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email} for profile in recipient_user_profiles]
event = dict(
type = 'typing',
op = notification['op'],
sender = sender_dict,
recipients = recipient_dicts)
send_event(event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender, notification_to, operator):
# type: (UserProfile, Sequence[Text], Text) -> None
typing_notification = check_typing_notification(sender, notification_to, operator)
do_send_typing_notification(typing_notification)
# check_typing_notification:
# Returns typing notification ready for sending with do_send_typing_notification on success
# or the error message (string) on error.
def check_typing_notification(sender, notification_to, operator):
# type: (UserProfile, Sequence[Text], Text) -> Dict[str, Any]
if len(notification_to) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
else:
try:
recipient = recipient_for_emails(notification_to, False,
sender, sender)
except ValidationError as e:
assert isinstance(e.messages[0], six.string_types)
raise JsonableError(e.messages[0])
if recipient.type == Recipient.STREAM:
raise ValueError('Forbidden recipient type')
return {'sender': sender, 'recipient': recipient, 'op': operator}
def do_create_stream(realm, stream_name):
# type: (Realm, Text) -> None
# This is used by a management command now, mostly to facilitate testing. It
# doesn't simulate every single aspect of creating a subscription; for example,
# we don't send Zulips to users to tell them they have been subscribed.
stream = Stream()
stream.realm = realm
stream.name = stream_name
stream.save()
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
subscribers = UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
bulk_add_subscriptions([stream], subscribers)
def create_stream_if_needed(realm, stream_name, invite_only=False, stream_description = ""):
# type: (Realm, Text, bool, Text) -> Tuple[Stream, bool]
(stream, created) = Stream.objects.get_or_create(
realm=realm, name__iexact=stream_name,
defaults={'name': stream_name,
'description': stream_description,
'invite_only': invite_only})
if created:
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
if not invite_only:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(event, active_user_ids(realm))
return stream, created
def create_streams_if_needed(realm, stream_dicts):
# type: (Realm, List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams = [] # type: List[Stream]
existing_streams = [] # type: List[Stream]
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
stream_description=stream_dict.get("description", ""))
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def recipient_for_emails(emails, not_forged_mirror_message,
user_profile, sender):
# type: (Iterable[Text], bool, UserProfile, UserProfile) -> Recipient
recipient_profile_ids = set()
# We exempt cross-realm bots from the check that all the recipients
# are in the same domain.
realm_domains = set()
exempt_emails = get_cross_realm_emails()
if sender.email not in exempt_emails:
realm_domains.add(sender.realm.domain)
for email in emails:
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid email '%s'") % (email,))
if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \
user_profile.realm.deactivated:
raise ValidationError(_("'%s' is no longer using Zulip.") % (email,))
recipient_profile_ids.add(user_profile.id)
if email not in exempt_emails:
realm_domains.add(user_profile.realm.domain)
if not_forged_mirror_message and user_profile.id not in recipient_profile_ids:
raise ValidationError(_("User not authorized for this query"))
if len(realm_domains) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profile_ids) == 2
and sender.id in recipient_profile_ids):
recipient_profile_ids.remove(sender.id)
if len(recipient_profile_ids) > 1:
# Make sure the sender is included in huddle messages
recipient_profile_ids.add(sender.id)
huddle = get_huddle(list(recipient_profile_ids))
return get_recipient(Recipient.HUDDLE, huddle.id)
else:
return get_recipient(Recipient.PERSONAL, list(recipient_profile_ids)[0])
def already_sent_mirrored_message_id(message):
# type: (Message) -> Optional[int]
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
messages = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
subject=message.subject,
sending_client=message.sending_client,
pub_date__gte=message.pub_date - time_window,
pub_date__lte=message.pub_date + time_window)
if messages.exists():
return messages[0].id
return None
def extract_recipients(s):
# type: (Union[str, Iterable[Text]]) -> List[Text]
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s) # type: ignore # This function has a super weird union argument.
except ValueError:
data = s
if isinstance(data, six.string_types):
data = data.split(',') # type: ignore # https://github.com/python/typeshed/pull/138
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
recipients = data
# Strip recipients, and then remove any duplicates and any that
# are the empty string after being stripped.
recipients = [recipient.strip() for recipient in recipients]
return list(set(recipient for recipient in recipients if recipient))
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
# type: (UserProfile, Client, Text, Sequence[Text], Text, Text, Optional[Realm], bool, Optional[float], Optional[UserProfile], Optional[Text], Optional[Text]) -> int
message = check_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id)
return do_send_messages([message])[0]
def check_stream_name(stream_name):
# type: (Text) -> None
if stream_name == "":
raise JsonableError(_("Stream can't be empty"))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long"))
if not valid_stream_name(stream_name):
raise JsonableError(_("Invalid stream name"))
def send_pm_if_empty_stream(sender, stream, stream_name, realm):
# type: (UserProfile, Stream, Text, Realm) -> None
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
if stream is not None:
num_subscribers = stream.num_subscribers()
if num_subscribers > 0:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone.now() - last_reminder <= waitperiod:
return
if stream is None:
error_msg = "that stream does not yet exist. To create it, "
else:
# num_subscribers == 0
error_msg = "there are no subscribers to that stream. To join it, "
content = ("Hi there! We thought you'd like to know that your bot **%s** just "
"tried to send a message to stream `%s`, but %s"
"click the gear in the left-side stream list." %
(sender.full_name, stream_name, error_msg))
message = internal_prep_message(settings.NOTIFICATION_BOT, "private",
sender.bot_owner.email, "", content)
do_send_messages([message])
sender.last_reminder = timezone.now()
sender.save(update_fields=['last_reminder'])
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
# type: (UserProfile, Client, Text, Sequence[Text], Text, Text, Optional[Realm], bool, Optional[float], Optional[UserProfile], Optional[Text], Optional[Text]) -> Dict[str, Any]
stream = None
if not message_to and message_type_name == 'stream' and sender.default_sending_stream:
# Use the users default stream
message_to = [sender.default_sending_stream.name]
elif len(message_to) == 0:
raise JsonableError(_("Message must have recipients"))
if len(message_content.strip()) == 0:
raise JsonableError(_("Message must not be empty"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if message_type_name == 'stream':
if len(message_to) > 1:
raise JsonableError(_("Cannot send to multiple streams"))
stream_name = message_to[0].strip()
check_stream_name(stream_name)
if subject_name is None:
raise JsonableError(_("Missing topic"))
subject = subject_name.strip()
if subject == "":
raise JsonableError(_("Topic can't be empty"))
subject = truncate_topic(subject)
## FIXME: Commented out temporarily while we figure out what we want
# if not valid_stream_name(subject):
# return json_error(_("Invalid subject name"))
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(sender, stream, stream_name, realm)
if stream is None:
raise JsonableError(_("Stream '%(stream_name)s' does not exist") % {'stream_name': escape(stream_name)})
recipient = get_recipient(Recipient.STREAM, stream.id)
if not stream.invite_only:
# This is a public stream
pass
elif subscribed_to_stream(sender, stream):
# Or it is private, but your are subscribed
pass
elif sender.is_api_super_user or (forwarder_user_profile is not None and
forwarder_user_profile.is_api_super_user):
# Or this request is being done on behalf of a super user
pass
elif sender.is_bot and subscribed_to_stream(sender.bot_owner, stream):
# Or you're a bot and your owner is subscribed.
pass
else:
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '%s'") % (stream.name,))
elif message_type_name == 'private':
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]
not_forged_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_emails(message_to, not_forged_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], six.string_types)
raise JsonableError(e.messages[0])
else:
raise JsonableError(_("Invalid message type"))
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if message_type_name == 'stream':
message.subject = subject
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.pub_date = timestamp_to_datetime(forged_timestamp)
else:
message.pub_date = timezone.now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id}
def internal_prep_message(sender_email, recipient_type_name, recipients,
subject, content, realm=None):
# type: (Text, str, Text, Text, Text, Optional[Realm]) -> Optional[Dict[str, Any]]
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
sender = get_user_profile_by_email(sender_email)
if realm is None:
realm = sender.realm
parsed_recipients = extract_recipients(recipients)
if recipient_type_name == "stream":
stream, _ = create_stream_if_needed(realm, parsed_recipients[0])
try:
return check_message(sender, get_client("Internal"), recipient_type_name,
parsed_recipients, subject, content, realm)
except JsonableError as e:
logging.error("Error queueing internal message by %s: %s" % (sender_email, str(e)))
return None
def internal_send_message(sender_email, recipient_type_name, recipients,
subject, content, realm=None):
# type: (Text, str, Text, Text, Text, Optional[Realm]) -> None
msg = internal_prep_message(sender_email, recipient_type_name, recipients,
subject, content, realm)
# internal_prep_message encountered an error
if msg is None:
return
do_send_messages([msg])
def pick_color(user_profile):
# type: (UserProfile) -> Text
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
return pick_color_helper(user_profile, subs)
def pick_color_helper(user_profile, subs):
# type: (UserProfile, Iterable[Subscription]) -> Text
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def get_subscription(stream_name, user_profile):
# type: (Text, UserProfile) -> Subscription
stream = get_stream(stream_name, user_profile.realm)
recipient = get_recipient(Recipient.STREAM, stream.id)
return Subscription.objects.get(user_profile=user_profile,
recipient=recipient, active=True)
def validate_user_access_to_subscribers(user_profile, stream):
# type: (Optional[UserProfile], Stream) -> None
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm__domain": stream.realm.domain,
"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(user_profile, stream))
def validate_user_access_to_subscribers_helper(user_profile, stream_dict, check_user_subscribed):
# type: (Optional[UserProfile], Mapping[str, Any], Callable[[], bool]) -> None
""" Helper for validate_user_access_to_subscribers that doesn't require a full stream object
* check_user_subscribed is a function that when called with no
arguments, will report whether the user is subscribed to the stream
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
if user_profile.realm.is_zephyr_mirror_realm and not stream_dict["invite_only"]:
raise JsonableError(_("You cannot get subscribers for public streams in this realm"))
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for invite-only stream"))
# sub_dict is a dictionary mapping stream_id => whether the user is subscribed to that stream
def bulk_get_subscriber_user_ids(stream_dicts, user_profile, sub_dict):
# type: (Iterable[Mapping[str, Any]], UserProfile, Mapping[int, bool]) -> Dict[int, List[int]]
target_stream_dicts = []
for stream_dict in stream_dicts:
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
subscriptions = Subscription.objects.select_related("recipient").filter(
recipient__type=Recipient.STREAM,
recipient__type_id__in=[stream["id"] for stream in target_stream_dicts],
user_profile__is_active=True,
active=True).values("user_profile_id", "recipient__type_id")
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
for sub in subscriptions:
result[sub["recipient__type_id"]].append(sub["user_profile_id"])
return result
def get_subscribers_query(stream, requesting_user):
# type: (Stream, UserProfile) -> QuerySet
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
user_profile__is_active=True,
active=True)
return subscriptions
def get_subscribers(stream, requesting_user=None):
# type: (Stream, Optional[UserProfile]) -> List[UserProfile]
subscriptions = get_subscribers_query(stream, requesting_user).select_related()
return [subscription.user_profile for subscription in subscriptions]
def get_subscriber_emails(stream, requesting_user=None):
# type: (Stream, Optional[UserProfile]) -> List[Text]
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def maybe_get_subscriber_emails(stream, user_profile):
# type: (Stream, UserProfile) -> List[Text]
""" Alternate version of get_subscriber_emails that takes a Stream object only
(not a name), and simply returns an empty list if unable to get a real
subscriber list (because we're on the MIT realm). """
try:
subscribers = get_subscriber_emails(stream, requesting_user=user_profile)
except JsonableError:
subscribers = []
return subscribers
def set_stream_color(user_profile, stream_name, color=None):
# type: (UserProfile, Text, Optional[Text]) -> Text
subscription = get_subscription(stream_name, user_profile)
if not color:
color = pick_color(user_profile)
subscription.color = color
subscription.save(update_fields=["color"])
return color
def notify_subscriptions_added(user_profile, sub_pairs, stream_emails, no_log=False):
# type: (UserProfile, Iterable[Tuple[Subscription, Stream]], Callable[[Stream], List[Text]], bool) -> None
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'domain': user_profile.realm.domain})
# Send a notification to the user who subscribed.
payload = [dict(name=stream.name,
stream_id=stream.id,
in_home_view=subscription.in_home_view,
invite_only=stream.invite_only,
color=subscription.color,
email_address=encode_email_address(stream),
desktop_notifications=subscription.desktop_notifications,
audible_notifications=subscription.audible_notifications,
description=stream.description,
pin_to_top=subscription.pin_to_top,
subscribers=stream_emails(stream))
for (subscription, stream) in sub_pairs]
event = dict(type="subscription", op="add",
subscriptions=payload)
send_event(event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream, altered_users, subscribed_users):
# type: (Stream, Iterable[UserProfile], Iterable[UserProfile]) -> Set[int]
'''
altered_users is a list of users that we are adding/removing
subscribed_users is the list of already subscribed users
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
altered_user_ids = [user.id for user in altered_users]
if stream.invite_only:
# PRIVATE STREAMS
all_subscribed_ids = [user.id for user in subscribed_users]
return set(all_subscribed_ids) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_user_ids(stream.realm)) - set(altered_user_ids)
def query_all_subs_by_stream(streams):
# type: (Iterable[Stream]) -> Dict[int, List[UserProfile]]
all_subs = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id__in=[stream.id for stream in streams],
user_profile__is_active=True,
active=True).select_related('recipient', 'user_profile')
all_subs_by_stream = defaultdict(list) # type: Dict[int, List[UserProfile]]
for sub in all_subs:
all_subs_by_stream[sub.recipient.type_id].append(sub.user_profile)
return all_subs_by_stream
def bulk_add_subscriptions(streams, users):
# type: (Iterable[Stream], Iterable[UserProfile]) -> Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams]) # type: Mapping[int, Recipient]
recipients = [recipient.id for recipient in recipients_map.values()] # type: List[int]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]
all_subs_query = Subscription.objects.select_related("user_profile")
for sub in all_subs_query.filter(user_profile__in=users,
recipient__type=Recipient.STREAM):
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]
new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]
for user_profile in users:
needs_new_sub = set(recipients) # type: Set[int]
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = [] # type: List[Tuple[Subscription, Stream]]
for (user_profile, recipient_id, stream) in new_subs:
color = pick_color_helper(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id,
desktop_notifications=user_profile.enable_stream_desktop_notifications,
audible_notifications=user_profile.enable_stream_sounds)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
Subscription.objects.filter(id__in=[sub.id for (sub, stream) in subs_to_activate]).update(active=True)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(event, active_user_ids(user_profile.realm))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subs_by_stream = query_all_subs_by_stream(streams=streams)
def fetch_stream_subscriber_emails(stream):
# type: (Stream) -> List[Text]
if stream.realm.is_zephyr_mirror_realm and not stream.invite_only:
return []
users = all_subs_by_stream[stream.id]
return [u.email for u in users]
sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]
new_streams = set() # type: Set[Tuple[int, int]]
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_emails)
for stream in streams:
if stream.realm.is_zephyr_mirror_realm and not stream.invite_only:
continue
new_users = [user for user in users if (user.id, stream.id) in new_streams]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_users=new_users,
subscribed_users=all_subs_by_stream[stream.id]
)
if peer_user_ids:
for added_user in new_users:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=added_user.id)
send_event(event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def notify_subscriptions_removed(user_profile, streams, no_log=False):
# type: (UserProfile, Iterable[Stream], bool) -> None
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'domain': user_profile.realm.domain})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(event, [user_profile.id])
def bulk_remove_subscriptions(users, streams):
# type: (Iterable[UserProfile], Iterable[Stream]) -> Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
recipients_map = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in streams]) # type: Mapping[int, Recipient]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = dict((user_profile.id, []) for user_profile in users) # type: Dict[int, List[Subscription]]
for sub in Subscription.objects.select_related("user_profile").filter(user_profile__in=users,
recipient__in=list(recipients_map.values()),
active=True):
subs_by_user[sub.user_profile_id].append(sub)
subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]
not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
for user_profile in users:
recipients_to_unsub = set([recipient.id for recipient in recipients_map.values()])
for sub in subs_by_user[user_profile.id]:
recipients_to_unsub.remove(sub.recipient_id)
subs_to_deactivate.append((sub, stream_map[sub.recipient_id]))
for recipient_id in recipients_to_unsub:
not_subscribed.append((user_profile, stream_map[recipient_id]))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.filter(id__in=[sub.id for (sub, stream_name) in
subs_to_deactivate]).update(active=False)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)
if not stream.invite_only]
if new_vacant_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_streams])
send_event(event, active_user_ids(user_profile.realm))
altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]
streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
all_subs_by_stream = query_all_subs_by_stream(streams=streams)
for stream in streams:
if stream.realm.is_zephyr_mirror_realm and not stream.invite_only:
continue
altered_users = altered_user_dict[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_users=altered_users,
subscribed_users=all_subs_by_stream[stream.id]
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(event, peer_user_ids)
return ([(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed)
def log_subscription_property_change(user_email, stream_name, property, value):
# type: (Text, Text, Text, Any) -> None
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile, sub, stream_name,
property_name, value):
# type: (UserProfile, Subscription, Text, Text, Any) -> None
setattr(sub, property_name, value)
sub.save(update_fields=[property_name])
log_subscription_property_change(user_profile.email, stream_name,
property_name, value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=property_name,
value=value,
name=stream_name)
send_event(event, [user_profile.id])
def do_activate_user(user_profile, log=True, join_date=timezone.now()):
# type: (UserProfile, bool, datetime.datetime) -> None
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = join_date
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
if log:
domain = user_profile.realm.domain
log_event({'type': 'user_activated',
'user': user_profile.email,
'domain': domain})
notify_created_user(user_profile)
def do_reactivate_user(user_profile):
# type: (UserProfile) -> None
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
domain = user_profile.realm.domain
log_event({'type': 'user_reactivated',
'user': user_profile.email,
'domain': domain})
notify_created_user(user_profile)
def do_change_password(user_profile, password, log=True, commit=True,
hashed_password=False):
# type: (UserProfile, Text, bool, bool, bool) -> None
if hashed_password:
# This is a hashed password, not the password itself.
user_profile.set_password(password)
else:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
if log:
log_event({'type': 'user_change_password',
'user': user_profile.email,
'pwhash': user_profile.password})
def do_change_full_name(user_profile, full_name, log=True):
# type: (UserProfile, Text, bool) -> None
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
if log:
log_event({'type': 'user_change_full_name',
'user': user_profile.email,
'full_name': full_name})
payload = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm))
if user_profile.is_bot:
send_event(dict(type='realm_bot', op='update', bot=payload),
bot_owner_userids(user_profile))
def do_change_tos_version(user_profile, tos_version, log=True):
# type: (UserProfile, Text, bool) -> None
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
if log:
log_event({'type': 'user_change_tos_version',
'user': user_profile.email,
'tos_version': tos_version})
def do_regenerate_api_key(user_profile, log=True):
# type: (UserProfile, bool) -> None
user_profile.api_key = random_api_key()
user_profile.save(update_fields=["api_key"])
if log:
log_event({'type': 'user_change_api_key',
'user': user_profile.email})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
api_key=user_profile.api_key,
)),
bot_owner_userids(user_profile))
def do_change_avatar_source(user_profile, avatar_source, log=True):
# type: (UserProfile, Text, bool) -> None
user_profile.avatar_source = avatar_source
user_profile.save(update_fields=["avatar_source"])
if log:
log_event({'type': 'user_change_avatar_source',
'user': user_profile.email,
'avatar_source': avatar_source})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_userids(user_profile))
else:
payload = dict(
email=user_profile.email,
avatar_url=avatar_url(user_profile),
user_id=user_profile.id
)
send_event(dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm))
def _default_stream_permision_check(user_profile, stream):
# type: (UserProfile, Optional[Stream]) -> None
# Any user can have a None default stream
if stream is not None:
if user_profile.is_bot:
user = user_profile.bot_owner
else:
user = user_profile
if stream.invite_only and not subscribed_to_stream(user, stream):
raise JsonableError(_('Insufficient permission'))
def do_change_default_sending_stream(user_profile, stream, log=True):
# type: (UserProfile, Stream, bool) -> None
_default_stream_permision_check(user_profile, stream)
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_userids(user_profile))
def do_change_default_events_register_stream(user_profile, stream, log=True):
# type: (UserProfile, Stream, bool) -> None
_default_stream_permision_check(user_profile, stream)
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_userids(user_profile))
def do_change_default_all_public_streams(user_profile, value, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_userids(user_profile))
def do_change_is_admin(user_profile, value, permission='administer'):
# type: (UserProfile, bool, str) -> None
if permission == "administer":
user_profile.is_realm_admin = value
user_profile.save(update_fields=["is_realm_admin"])
elif permission == "api_super_user":
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
else:
raise Exception("Unknown permission")
if permission == 'administer':
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
is_admin=value))
send_event(event, active_user_ids(user_profile.realm))
def do_change_bot_type(user_profile, value):
# type: (UserProfile, int) -> None
user_profile.bot_type = value
user_profile.save(update_fields=["bot_type"])
def do_make_stream_public(user_profile, realm, stream_name):
# type: (UserProfile, Realm, Text) -> None
stream_name = stream_name.strip()
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError(_('Unknown stream "%s"') % (stream_name,))
if not subscribed_to_stream(user_profile, stream):
raise JsonableError(_('You are not invited to this stream.'))
stream.invite_only = False
stream.save(update_fields=['invite_only'])
def do_make_stream_private(realm, stream_name):
# type: (Realm, Text) -> None
stream_name = stream_name.strip()
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError(_('Unknown stream "%s"') % (stream_name,))
stream.invite_only = True
stream.save(update_fields=['invite_only'])
def do_rename_stream(realm, old_name, new_name, log=True):
# type: (Realm, Text, Text, bool) -> Dict[str, Text]
old_name = old_name.strip()
new_name = new_name.strip()
stream = get_stream(old_name, realm)
if not stream:
raise JsonableError(_('Unknown stream "%s"') % (old_name,))
# Will raise if there's an issue.
check_stream_name(new_name)
if get_stream(new_name, realm) and old_name.lower() != new_name.lower():
raise JsonableError(_('Stream name "%s" is already taken') % (new_name,))
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'domain': realm.domain,
'new_name': new_name})
recipient = get_recipient(Recipient.STREAM, stream.id)
messages = Message.objects.filter(recipient=recipient).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, realm)
new_cache_key = get_stream_cache_key(stream.name, realm)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient.id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id, True) for message in messages)
cache_delete_many(
to_dict_cache_key_id(message.id, False) for message in messages)
new_email = encode_email_address(stream)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
name=old_name
)
send_event(event, can_access_stream_user_ids(stream))
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(realm, stream_name, new_description):
# type: (Realm, Text, Text) -> None
stream = get_stream(stream_name, realm)
stream.description = new_description
stream.save(update_fields=['description'])
event = dict(type='stream', op='update',
property='description', name=stream_name,
value=new_description)
send_event(event, can_access_stream_user_ids(stream))
def do_create_realm(string_id, name, restricted_to_domain=None,
invite_required=None, org_type=None):
# type: (Text, Text, Optional[bool], Optional[bool], Optional[int]) -> Tuple[Realm, bool]
realm = get_realm(string_id)
created = not realm
if created:
kwargs = {} # type: Dict[str, Any]
if restricted_to_domain is not None:
kwargs['restricted_to_domain'] = restricted_to_domain
if invite_required is not None:
kwargs['invite_required'] = invite_required
if org_type is not None:
kwargs['org_type'] = org_type
realm = Realm(string_id=string_id, name=name,
domain=string_id + '@acme.com', **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream, _ = create_stream_if_needed(realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME)
realm.notifications_stream = notifications_stream
realm.save(update_fields=['notifications_stream'])
# Include a welcome message in this notifications stream
product_name = "Zulip"
content = """Hello, and welcome to %s!
This is a message on stream `%s` with the topic `welcome`. We'll use this stream for
system-generated notifications.""" % (product_name, notifications_stream.name,)
msg = internal_prep_message(settings.WELCOME_BOT, 'stream',
notifications_stream.name, "welcome",
content, realm=realm)
do_send_messages([msg])
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"restricted_to_domain": restricted_to_domain,
"invite_required": invite_required,
"org_type": org_type})
if settings.NEW_USER_BOT is not None:
signup_message = "Signups enabled"
internal_send_message(settings.NEW_USER_BOT, "stream",
"signups", string_id, signup_message)
return (realm, created)
def do_change_enable_stream_desktop_notifications(user_profile,
enable_stream_desktop_notifications,
log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_stream_desktop_notifications = enable_stream_desktop_notifications
user_profile.save(update_fields=["enable_stream_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_stream_desktop_notifications',
'setting': enable_stream_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_stream_sounds(user_profile, enable_stream_sounds, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_stream_sounds = enable_stream_sounds
user_profile.save(update_fields=["enable_stream_sounds"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_stream_sounds',
'setting': enable_stream_sounds}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_desktop_notifications(user_profile, enable_desktop_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_desktop_notifications = enable_desktop_notifications
user_profile.save(update_fields=["enable_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_desktop_notifications',
'setting': enable_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_pm_content_in_desktop_notifications(user_profile,
pm_content_in_desktop_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.pm_content_in_desktop_notifications \
= pm_content_in_desktop_notifications
user_profile.save(update_fields=["pm_content_in_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'pm_content_in_desktop_notifications',
'setting': pm_content_in_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_sounds(user_profile, enable_sounds, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_sounds = enable_sounds
user_profile.save(update_fields=["enable_sounds"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_sounds',
'setting': enable_sounds}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_offline_email_notifications(user_profile, offline_email_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_offline_email_notifications = offline_email_notifications
user_profile.save(update_fields=["enable_offline_email_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_offline_email_notifications',
'setting': offline_email_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_offline_push_notifications(user_profile, offline_push_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_offline_push_notifications = offline_push_notifications
user_profile.save(update_fields=["enable_offline_push_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_offline_push_notifications',
'setting': offline_push_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_online_push_notifications(user_profile, enable_online_push_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_online_push_notifications = enable_online_push_notifications
user_profile.save(update_fields=["enable_online_push_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_online_push_notifications',
'setting': enable_online_push_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_digest_emails(user_profile, enable_digest_emails, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_digest_emails = enable_digest_emails
user_profile.save(update_fields=["enable_digest_emails"])
if not enable_digest_emails:
# Remove any digest emails that have been enqueued.
clear_followup_emails_queue(user_profile.email)
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_digest_emails',
'setting': enable_digest_emails}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_autoscroll_forever(user_profile, autoscroll_forever, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.autoscroll_forever = autoscroll_forever
user_profile.save(update_fields=["autoscroll_forever"])
if log:
log_event({'type': 'autoscroll_forever',
'user': user_profile.email,
'autoscroll_forever': autoscroll_forever})
def do_change_enter_sends(user_profile, enter_sends):
# type: (UserProfile, bool) -> None
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_change_default_desktop_notifications(user_profile, default_desktop_notifications):
# type: (UserProfile, bool) -> None
user_profile.default_desktop_notifications = default_desktop_notifications
user_profile.save(update_fields=["default_desktop_notifications"])
def do_change_twenty_four_hour_time(user_profile, setting_value, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.twenty_four_hour_time = setting_value
user_profile.save(update_fields=["twenty_four_hour_time"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'twenty_four_hour_time',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_left_side_userlist(user_profile, setting_value, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.left_side_userlist = setting_value
user_profile.save(update_fields=["left_side_userlist"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'left_side_userlist',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_default_language(user_profile, setting_value, log=True):
# type: (UserProfile, Text, bool) -> None
if setting_value == 'zh_CN':
# NB: remove this once we upgrade to Django 1.9
# zh-cn and zh-tw will be replaced by zh-hans and zh-hant in
# Django 1.9
setting_value = 'zh_HANS'
user_profile.default_language = setting_value
user_profile.save(update_fields=["default_language"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'default_language',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def set_default_streams(realm, stream_dict):
# type: (Realm, Dict[Text, Dict[Text, Any]]) -> None
DefaultStream.objects.filter(realm=realm).delete()
stream_names = []
for name, options in stream_dict.items():
stream_names.append(name)
stream, _ = create_stream_if_needed(realm,
name,
invite_only = options["invite_only"],
stream_description = options["description"])
DefaultStream.objects.create(stream=stream, realm=realm)
# Always include the realm's default notifications streams, if it exists
if realm.notifications_stream is not None:
DefaultStream.objects.get_or_create(stream=realm.notifications_stream, realm=realm)
log_event({'type': 'default_streams',
'domain': realm.domain,
'streams': stream_names})
def notify_default_streams(realm):
# type: (Realm) -> None
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm))
)
send_event(event, active_user_ids(realm))
def do_add_default_stream(realm, stream_name):
# type: (Realm, Text) -> None
stream, _ = create_stream_if_needed(realm, stream_name)
if not DefaultStream.objects.filter(realm=realm, stream=stream).exists():
DefaultStream.objects.create(realm=realm, stream=stream)
notify_default_streams(realm)
def do_remove_default_stream(realm, stream_name):
# type: (Realm, Text) -> None
stream = get_stream(stream_name, realm)
if stream is None:
raise JsonableError(_("Stream does not exist"))
DefaultStream.objects.filter(realm=realm, stream=stream).delete()
notify_default_streams(realm)
def get_default_streams_for_realm(realm):
# type: (Realm) -> List[Stream]
return [default.stream for default in
DefaultStream.objects.select_related("stream", "stream__realm").filter(realm=realm)]
def get_default_subs(user_profile):
# type: (UserProfile) -> List[Stream]
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams):
# type: (List[Stream]) -> List[Dict[str, Any]]
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile, log_time):
# type: (UserProfile, datetime.datetime) -> None
effective_end = log_time + datetime.timedelta(minutes=15)
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile, client, query, log_time):
# type: (UserProfile, Client, Text, datetime.datetime) -> None
(activity, created) = UserActivity.objects.get_or_create(
user_profile = user_profile,
client = client,
query = query,
defaults={'last_visit': log_time, 'count': 0})
activity.count += 1
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile, presence):
# type: (UserProfile, UserPresence) -> None
presence_dict = presence.to_dict()
event = dict(type="presence", email=user_profile.email,
server_timestamp=time.time(),
presence={presence_dict['client']: presence.to_dict()})
send_event(event, active_user_ids(user_profile.realm))
def consolidate_client(client):
# type: (Client) -> Client
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile, client, log_time, status):
# type: (UserProfile, Client, datetime.datetime, int) -> None
client = consolidate_client(client)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = {'timestamp': log_time,
'status': status})
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.is_zephyr_mirror_realm and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile, log_time):
# type: (UserProfile, datetime.datetime) -> None
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event,
lambda e: do_update_user_activity_interval(user_profile, log_time))
def update_user_presence(user_profile, client, log_time, status,
new_user_input):
# type: (UserProfile, Client, datetime.datetime, int, bool) -> None
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event,
lambda e: do_update_user_presence(user_profile, client,
log_time, status))
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile, pointer, update_flags=False):
# type: (UserProfile, int, bool) -> None
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags:
# Until we handle the new read counts in the Android app
# natively, this is a shim that will mark as read any messages
# up until the pointer move
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer,
flags=~UserMessage.flags.read) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
event = dict(type='pointer', pointer=pointer)
send_event(event, [user_profile.id])
def do_update_message_flags(user_profile, operation, flag, messages, all, stream_obj, topic_name):
# type: (UserProfile, Text, Text, Sequence[int], bool, Optional[Stream], Optional[Text]) -> int
flagattr = getattr(UserMessage.flags, flag)
if all:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(user_profile=user_profile)
elif stream_obj is not None:
recipient = get_recipient(Recipient.STREAM, stream_obj.id)
if topic_name:
msgs = UserMessage.objects.filter(message__recipient=recipient,
user_profile=user_profile,
message__subject__iexact=topic_name)
else:
msgs = UserMessage.objects.filter(message__recipient=recipient, user_profile=user_profile)
else:
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# Hack to let you star any message
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
# The filter() statements below prevent postgres from doing a lot of
# unnecessary work, which is a big deal for users updating lots of
# flags (e.g. bankruptcy). This patch arose from seeing slow calls
# to POST /json/messages/flags in the logs. The filter() statements
# are kind of magical; they are actually just testing the one bit.
if operation == 'add':
msgs = msgs.filter(flags=~flagattr)
if stream_obj:
messages = list(msgs.values_list('message__id', flat=True))
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
msgs = msgs.filter(flags=flagattr)
if stream_obj:
messages = list(msgs.values_list('message__id', flat=True))
count = msgs.update(flags=F('flags').bitand(~flagattr))
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': all}
log_event(event)
send_event(event, [user_profile.id])
statsd.incr("flags.%s.%s" % (flag, operation), count)
return count
def subscribed_to_stream(user_profile, stream):
# type: (UserProfile, Stream) -> bool
try:
if Subscription.objects.get(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id):
return True
return False
except Subscription.DoesNotExist:
return False
def truncate_content(content, max_length, truncation_message):
# type: (Text, int, Text) -> Text
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body):
# type: (Text) -> Text
return truncate_content(body, MAX_MESSAGE_LENGTH, "...")
def truncate_topic(topic):
# type: (Text) -> Text
return truncate_content(topic, MAX_SUBJECT_LENGTH, "...")
def update_user_message_flags(message, ums):
# type: (Message, Iterable[UserMessage]) -> None
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set() # type: Set[UserMessage]
def update_flag(um, should_set, flag):
# type: (UserMessage, bool, int) -> None
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
is_me_message = getattr(message, 'is_me_message', False)
update_flag(um, is_me_message, UserMessage.flags.is_me_message)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages):
# type: (List[Message]) -> List[int]
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
for changed_message in changed_messages:
message_ids.append(changed_message.id)
items_for_remote_cache[to_dict_cache_key(changed_message, True)] = \
(MessageDict.to_dict_uncached(changed_message, apply_markdown=True),)
items_for_remote_cache[to_dict_cache_key(changed_message, False)] = \
(MessageDict.to_dict_uncached(changed_message, apply_markdown=False),)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile, message, content, rendered_content):
# type: (UserProfile, Message, Optional[Text], Optional[Text]) -> None
event = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
log_event(event)
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um):
# type: (UserMessage) -> Dict[str, Any]
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, list(map(user_info, ums)))
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile, message, subject, propagate_mode, content, rendered_content):
# type: (UserProfile, Message, Optional[Text], str, Optional[Text], Optional[Text]) -> None
event = {'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
edit_history_event = {} # type: Dict[str, Any]
changed_messages = [message]
# Set first_rendered_content to be the oldest version of the
# rendered content recorded; which is the current version if the
# content hasn't been edited before. Note that because one could
# have edited just the subject, not every edit history event
# contains a prev_rendered_content element.
first_rendered_content = message.rendered_content
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
for old_edit_history_event in edit_history:
if 'prev_rendered_content' in old_edit_history_event:
first_rendered_content = old_edit_history_event['prev_rendered_content']
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
# We are turning off diff highlighting everywhere until ticket #1532 is addressed.
if False:
# Don't highlight message edit diffs on prod
rendered_content = highlight_html_differences(first_rendered_content, rendered_content)
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
prev_content = edit_history_event['prev_content']
if Message.content_has_attachment(prev_content) or Message.content_has_attachment(message.content):
check_attachment_reference_change(prev_content, message)
if subject is not None:
orig_subject = message.topic_name()
subject = truncate_topic(subject)
event["orig_subject"] = orig_subject
event["propagate_mode"] = propagate_mode
message.subject = subject
event["stream_id"] = message.recipient.type_id
event["subject"] = subject
event['subject_links'] = bugdown.subject_links(message.sender.realm_id, subject)
edit_history_event["prev_subject"] = orig_subject
if propagate_mode in ["change_later", "change_all"]:
propagate_query = Q(recipient = message.recipient, subject = orig_subject)
# We only change messages up to 2 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
if propagate_mode == 'change_all':
before_bound = now() - datetime.timedelta(days=2)
propagate_query = (propagate_query & ~Q(id = message.id) &
Q(pub_date__range=(before_bound, now())))
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
messages = Message.objects.filter(propagate_query).select_related()
# Evaluate the query before running the update
messages_list = list(messages)
messages.update(subject=subject)
for m in messages_list:
# The cached ORM object is not changed by messages.update()
# and the remote cache update requires the new value
m.subject = subject
changed_messages += messages_list
message.last_edit_time = timezone.now()
event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)
edit_history_event['timestamp'] = event['edit_timestamp']
if message.edit_history is not None:
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
log_event(event)
message.save(update_fields=["subject", "content", "rendered_content",
"rendered_content_version", "last_edit_time",
"edit_history"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um):
# type: (UserMessage) -> Dict[str, Any]
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, list(map(user_info, ums)))
def encode_email_address(stream):
# type: (Stream) -> Text
return encode_email_address_helper(stream.name, stream.email_token)
def encode_email_address_helper(name, email_token):
# type: (Text, Text) -> Text
# Some deployments may not use the email gateway
if settings.EMAIL_GATEWAY_PATTERN == '':
return ''
# Given the fact that we have almost no restrictions on stream names and
# that what characters are allowed in e-mail addresses is complicated and
# dependent on context in the address, we opt for a very simple scheme:
#
# Only encode the stream name (leave the + and token alone). Encode
# everything that isn't alphanumeric plus _ as the percent-prefixed integer
# ordinal of that character, padded with zeroes to the maximum number of
# bytes of a UTF-8 encoded Unicode character.
encoded_name = re.sub("\W", lambda x: "%" + str(ord(x.group(0))).zfill(4), name)
encoded_token = "%s+%s" % (encoded_name, email_token)
return settings.EMAIL_GATEWAY_PATTERN % (encoded_token,)
def get_email_gateway_message_string_from_address(address):
# type: (Text) -> Optional[Text]
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
if settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK:
# Accept mails delivered to any Zulip server
pattern_parts[-1] = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK
match_email_re = re.compile("(.*?)".join(pattern_parts))
match = match_email_re.match(address)
if not match:
return None
msg_string = match.group(1)
return msg_string
def decode_email_address(email):
# type: (Text) -> Tuple[Text, Text]
# Perform the reverse of encode_email_address. Returns a tuple of (streamname, email_token)
msg_string = get_email_gateway_message_string_from_address(email)
if '.' in msg_string:
# Workaround for Google Groups and other programs that don't accept emails
# that have + signs in them (see Trac #2102)
encoded_stream_name, token = msg_string.split('.')
else:
encoded_stream_name, token = msg_string.split('+')
stream_name = re.sub("%\d{4}", lambda x: unichr(int(x.group(0)[1:])), encoded_stream_name)
return stream_name, token
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile):
# type: (UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
sub_dicts = Subscription.objects.select_related("recipient").filter(
user_profile = user_profile,
recipient__type = Recipient.STREAM).values(
"recipient__type_id", "in_home_view", "color", "desktop_notifications",
"audible_notifications", "active", "pin_to_top")
stream_ids = set([sub["recipient__type_id"] for sub in sub_dicts])
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values("id", "name", "invite_only", "realm_id",
"realm__domain", "email_token", "description")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["recipient__type_id"]] for sub in sub_dicts
if sub["recipient__type_id"] in stream_hash]
streams_subscribed_map = dict((sub["recipient__type_id"], sub["active"]) for sub in sub_dicts)
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
subscriber_map = bulk_get_subscriber_user_ids(all_streams, user_profile, streams_subscribed_map)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["recipient__type_id"])
stream = stream_hash.get(sub["recipient__type_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
subscribers = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore.
if stream["invite_only"] and not sub["active"]:
subscribers = None
stream_dict = {'name': stream["name"],
'in_home_view': sub["in_home_view"],
'invite_only': stream["invite_only"],
'color': sub["color"],
'desktop_notifications': sub["desktop_notifications"],
'audible_notifications': sub["audible_notifications"],
'pin_to_top': sub["pin_to_top"],
'stream_id': stream["id"],
'description': stream["description"],
'email_address': encode_email_address_helper(stream["name"], stream["email_token"])}
if subscribers is not None:
stream_dict['subscribers'] = subscribers
if sub["active"]:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
# Listing public streams are disabled for Zephyr mirroring realms.
if user_profile.realm.is_zephyr_mirror_realm:
never_subscribed_stream_ids = set() # type: Set[int]
else:
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
if not stream['invite_only']:
stream_dict = {'name': stream['name'],
'invite_only': stream['invite_only'],
'stream_id': stream['id'],
'description': stream['description']}
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(user_profile):
# type: (UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]
subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
user_ids = set()
for subs in [subscribed, unsubscribed, never_subscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = [email_dict[user_id] for user_id in sub['subscribers']]
return (subscribed, unsubscribed)
def get_status_dict(requesting_user_profile):
# type: (UserProfile) -> Dict[Text, Dict[Text, Dict[str, Any]]]
if requesting_user_profile.realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)
def get_realm_user_dicts(user_profile):
# type: (UserProfile) -> List[Dict[str, Text]]
return [{'email': userdict['email'],
'user_id': userdict['id'],
'is_admin': userdict['is_realm_admin'],
'is_bot': userdict['is_bot'],
'full_name': userdict['full_name']}
for userdict in get_active_user_dicts_in_realm(user_profile.realm)]
def get_cross_realm_dicts():
# type: () -> List[Dict[str, Any]]
users = [get_user_profile_by_email(email) for email in get_cross_realm_emails()]
return [{'email': user.email,
'user_id': user.id,
'is_admin': user.is_realm_admin,
'is_bot': user.is_bot,
'full_name': user.full_name}
for user in users]
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile, event_types, queue_id):
# type: (UserProfile, Optional[Iterable[str]], str) -> Dict[str, Any]
state = {'queue_id': queue_id} # type: Dict[str, Any]
if event_types is None:
want = lambda msg_type: True
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('message'):
# The client should use get_old_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = ujson.loads(user_profile.muted_topics)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
state['realm_name'] = user_profile.realm.name
state['realm_restricted_to_domain'] = user_profile.realm.restricted_to_domain
state['realm_invite_required'] = user_profile.realm.invite_required
state['realm_invite_by_admins_only'] = user_profile.realm.invite_by_admins_only
state['realm_authentication_methods'] = user_profile.realm.authentication_methods_dict()
state['realm_create_stream_by_admins_only'] = user_profile.realm.create_stream_by_admins_only
state['realm_add_emoji_by_admins_only'] = user_profile.realm.add_emoji_by_admins_only
state['realm_allow_message_editing'] = user_profile.realm.allow_message_editing
state['realm_message_content_edit_limit_seconds'] = user_profile.realm.message_content_edit_limit_seconds
state['realm_default_language'] = user_profile.realm.default_language
state['realm_waiting_period_threshold'] = user_profile.realm.waiting_period_threshold
if want('realm_domain'):
state['realm_domain'] = user_profile.realm.domain
if want('realm_domains'):
state['realm_domains'] = do_get_realm_aliases(user_profile.realm)
if want('realm_emoji'):
state['realm_emoji'] = user_profile.realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_realm(user_profile.realm_id)
if want('realm_user'):
state['realm_users'] = get_realm_user_dicts(user_profile)
if want('realm_bot'):
state['realm_bots'] = get_owned_bot_dicts(user_profile)
if want('referral'):
state['referrals'] = {'granted': user_profile.invites_granted,
'used': user_profile.invites_used}
if want('subscription'):
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['never_subscribed'] = never_subscribed
if want('update_message_flags'):
# There's no initial data for message flag updates, client will
# get any updates during a session from get_events()
pass
if want('stream'):
state['streams'] = do_get_streams(user_profile)
if want('default_streams'):
state['realm_default_streams'] = streams_to_dicts_sorted(get_default_streams_for_realm(user_profile.realm))
if want('update_display_settings'):
state['twenty_four_hour_time'] = user_profile.twenty_four_hour_time
state['left_side_userlist'] = user_profile.left_side_userlist
default_language = user_profile.default_language
if user_profile.default_language == 'zh_HANS':
# NB: remove this once we upgrade to Django 1.9
# zh-cn and zh-tw will be replaced by zh-hans and zh-hant in
# Django 1.9
default_language = 'zh_CN'
state['default_language'] = default_language
if want('update_global_notifications'):
state['enable_stream_desktop_notifications'] = user_profile.enable_stream_desktop_notifications
state['enable_stream_sounds'] = user_profile.enable_stream_sounds
state['enable_desktop_notifications'] = user_profile.enable_desktop_notifications
state['enable_sounds'] = user_profile.enable_sounds
state['enable_offline_email_notifications'] = user_profile.enable_offline_email_notifications
state['enable_offline_push_notifications'] = user_profile.enable_offline_push_notifications
state['enable_online_push_notifications'] = user_profile.enable_online_push_notifications
state['enable_digest_emails'] = user_profile.enable_digest_emails
return state
def apply_events(state, events, user_profile):
# type: (Dict[str, Any], Iterable[Dict[str, Any]], UserProfile) -> None
for event in events:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
def our_person(p):
# type: (Dict[str, Any]) -> bool
return p['email'] == person['email']
if event['op'] == "add":
state['realm_users'].append(person)
elif event['op'] == "remove":
state['realm_users'] = [user for user in state['realm_users'] if not our_person(user)]
elif event['op'] == 'update':
for p in state['realm_users']:
if our_person(p):
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if ('is_admin' in person and 'realm_bots' in state and
user_profile.email == person['email']):
if p['is_admin'] and not person['is_admin']:
state['realm_bots'] = []
if not p['is_admin'] and person['is_admin']:
state['realm_bots'] = get_owned_bot_dicts(user_profile)
# Now update the person
p.update(person)
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
state['realm_bots'] = [b for b in state['realm_bots'] if b['email'] != email]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'create':
for stream in event['streams']:
if not stream['invite_only']:
stream_data = copy.deepcopy(stream)
stream_data['subscribers'] = []
# Add stream to never_subscribed (if not invite_only)
state['never_subscribed'].append(stream_data)
if event['op'] == 'delete':
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
stream['stream_id'] not in deleted_stream_ids]
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
elif event['type'] == 'default_streams':
state['realm_default_streams'] = event['default_streams']
elif event['type'] == 'realm':
if event['op'] == "update":
field = 'realm_' + event['property']
state[field] = event['value']
elif event['op'] == "update_dict":
for key, value in event['data'].items():
state['realm_' + key] = value
elif event['type'] == "subscription":
if event['op'] in ["add"]:
# Convert the user_profile IDs to emails since that's what register() returns
# TODO: Clean up this situation
for item in event["subscriptions"]:
item["subscribers"] = [get_user_profile_by_email(email).id for email in item["subscribers"]]
def name(sub):
# type: (Dict[str, Any]) -> Text
return sub['name'].lower()
if event['op'] == "add":
added_names = set(map(name, event["subscriptions"]))
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
# remove them from never_subscribed if they had been there
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
elif event['op'] == "remove":
removed_names = set(map(name, event["subscriptions"]))
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state['subscriptions']))
# Remove our user from the subscribers of the removed subscriptions.
for sub in removed_subs:
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
for sub in state['never_subscribed']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
state['presences'][event['email']] = event['presence']
elif event['type'] == "update_message":
# The client will get the updated message directly
pass
elif event['type'] == "referral":
state['referrals'] = event['referrals']
elif event['type'] == "update_message_flags":
# The client will get the message with the updated flags directly
pass
elif event['type'] == "realm_domains":
if event['op'] == 'add':
state['realm_domains'].append(event['alias'])
elif event['op'] == 'remove':
state['realm_domains'] = [alias for alias in state['realm_domains'] if alias['id'] != event['alias_id']]
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
if event['setting_name'] == "twenty_four_hour_time":
state['twenty_four_hour_time'] = event["setting"]
if event['setting_name'] == 'left_side_userlist':
state['left_side_userlist'] = event["setting"]
elif event['type'] == "update_global_notifications":
if event['notification_name'] == "enable_stream_desktop_notifications":
state['enable_stream_desktop_notifications'] = event['setting']
elif event['notification_name'] == "enable_stream_sounds":
state['enable_stream_sounds'] = event['setting']
elif event['notification_name'] == "enable_desktop_notifications":
state['enable_desktop_notifications'] = event['setting']
elif event['notification_name'] == "enable_sounds":
state['enable_sounds'] = event['setting']
elif event['notification_name'] == "enable_offline_email_notifications":
state['enable_offline_email_notifications'] = event['setting']
elif event['notification_name'] == "enable_offline_push_notifications":
state['enable_offline_push_notifications'] = event['setting']
elif event['notification_name'] == "enable_online_push_notifications":
state['enable_online_push_notifications'] = event['setting']
elif event['notification_name'] == "enable_digest_emails":
state['enable_digest_emails'] = event['setting']
else:
raise ValueError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile, user_client, apply_markdown=True,
event_types=None, queue_lifespan_secs=0, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, Optional[Iterable[str]], int, bool, Iterable[Sequence[Text]]) -> Dict[str, Any]
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
queue_id = request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
if event_types is not None:
event_types_set = set(event_types) # type: Optional[Set[str]]
else:
event_types_set = None
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile)
if events:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
def do_send_confirmation_email(invitee, referrer):
# type: (PreregistrationUser, UserProfile) -> None
"""
Send the confirmation/welcome e-mail to an invited user.
`invitee` is a PreregistrationUser.
`referrer` is a UserProfile.
"""
subject_template_path = 'confirmation/invite_email_subject.txt'
body_template_path = 'confirmation/invite_email_body.txt'
context = {'referrer': referrer,
'support_email': settings.ZULIP_ADMINISTRATOR,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS}
if referrer.realm.is_zephyr_mirror_realm:
subject_template_path = 'confirmation/mituser_invite_email_subject.txt'
body_template_path = 'confirmation/mituser_invite_email_body.txt'
Confirmation.objects.send_confirmation(
invitee, invitee.email, additional_context=context,
subject_template_path=subject_template_path,
body_template_path=body_template_path, host=referrer.realm.host)
@statsd_increment("push_notifications")
def handle_push_notification(user_profile_id, missed_message):
# type: (int, Dict[str, Any]) -> None
try:
user_profile = get_user_profile_by_id(user_profile_id)
if not (receives_offline_notifications(user_profile) or receives_online_notifications(user_profile)):
return
umessage = UserMessage.objects.get(user_profile=user_profile,
message__id=missed_message['message_id'])
message = umessage.message
if umessage.flags.read:
return
sender_str = message.sender.full_name
apple = num_push_devices_for_user(user_profile, kind=PushDeviceToken.APNS)
android = num_push_devices_for_user(user_profile, kind=PushDeviceToken.GCM)
if apple or android:
# TODO: set badge count in a better way
# Determine what alert string to display based on the missed messages
if message.recipient.type == Recipient.HUDDLE:
alert = "New private group message from %s" % (sender_str,)
elif message.recipient.type == Recipient.PERSONAL:
alert = "New private message from %s" % (sender_str,)
elif message.recipient.type == Recipient.STREAM:
alert = "New mention from %s" % (sender_str,)
else:
alert = "New Zulip mentions and private messages from %s" % (sender_str,)
if apple:
apple_extra_data = {'message_ids': [message.id]}
send_apple_push_notification(user_profile, alert, badge=1, zulip=apple_extra_data)
if android:
content = message.content
content_truncated = (len(content) > 200)
if content_truncated:
content = content[:200] + "..."
android_data = {
'user': user_profile.email,
'event': 'message',
'alert': alert,
'zulip_message_id': message.id, # message_id is reserved for CCS
'time': datetime_to_timestamp(message.pub_date),
'content': content,
'content_truncated': content_truncated,
'sender_email': message.sender.email,
'sender_full_name': message.sender.full_name,
'sender_avatar_url': get_avatar_url(message.sender.avatar_source, message.sender.email),
}
if message.recipient.type == Recipient.STREAM:
android_data['recipient_type'] = "stream"
android_data['stream'] = get_display_recipient(message.recipient)
android_data['topic'] = message.subject
elif message.recipient.type in (Recipient.HUDDLE, Recipient.PERSONAL):
android_data['recipient_type'] = "private"
send_android_push_notification(user_profile, android_data)
except UserMessage.DoesNotExist:
logging.error("Could not find UserMessage with message_id %s" % (missed_message['message_id'],))
def is_inactive(email):
# type: (Text) -> None
try:
if get_user_profile_by_email(email).is_active:
raise ValidationError(u'%s is already active' % (email,))
except UserProfile.DoesNotExist:
pass
def user_email_is_unique(email):
# type: (Text) -> None
try:
get_user_profile_by_email(email)
raise ValidationError(u'%s is already registered' % (email,))
except UserProfile.DoesNotExist:
pass
def do_invite_users(user_profile, invitee_emails, streams):
# type: (UserProfile, SizedTextIterable, Iterable[Stream]) -> Tuple[Optional[str], Dict[str, Union[List[Tuple[Text, str]], bool]]]
validated_emails = [] # type: List[Text]
errors = [] # type: List[Tuple[Text, str]]
skipped = [] # type: List[Tuple[Text, str]]
ret_error = None # type: Optional[str]
ret_error_data = {} # type: Dict[str, Union[List[Tuple[Text, str]], bool]]
for email in invitee_emails:
if email == '':
continue
try:
validators.validate_email(email)
except ValidationError:
errors.append((email, _("Invalid address.")))
continue
if not email_allowed_for_realm(email, user_profile.realm):
errors.append((email, _("Outside your domain.")))
continue
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
skipped.append((email, _("Already has an account.")))
continue
validated_emails.append(email)
if errors:
ret_error = _("Some emails did not validate, so we didn't send any invitations.")
ret_error_data = {'errors': errors + skipped, 'sent_invitations': False}
return ret_error, ret_error_data
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
ret_error = _("We weren't able to invite anyone.")
ret_error_data = {'errors': skipped, 'sent_invitations': False}
return ret_error, ret_error_data
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile)
# We save twice because you cannot associate a ManyToMany field
# on an unsaved object.
prereg_user.save()
prereg_user.streams = streams
prereg_user.save()
event = {"email": prereg_user.email, "referrer_email": user_profile.email}
queue_json_publish("invites", event,
lambda event: do_send_confirmation_email(prereg_user, user_profile))
if skipped:
ret_error = _("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!")
ret_error_data = {'errors': skipped, 'sent_invitations': True}
return ret_error, ret_error_data
def send_referral_event(user_profile):
# type: (UserProfile) -> None
event = dict(type="referral",
referrals=dict(granted=user_profile.invites_granted,
used=user_profile.invites_used))
send_event(event, [user_profile.id])
def do_refer_friend(user_profile, email):
# type: (UserProfile, Text) -> None
content = ('Referrer: "%s" <%s>\n'
'Realm: %s\n'
'Referred: %s') % (user_profile.full_name, user_profile.email,
user_profile.realm.domain, email)
subject = "Zulip referral: %s" % (email,)
from_email = '"%s" <%s>' % (user_profile.full_name, 'referrals@zulip.com')
to_email = '"Zulip Referrals" <zulip+referrals@zulip.com>'
headers = {'Reply-To': '"%s" <%s>' % (user_profile.full_name, user_profile.email,)}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
referral = Referral(user_profile=user_profile, email=email)
referral.save()
user_profile.invites_used += 1
user_profile.save(update_fields=['invites_used'])
send_referral_event(user_profile)
def notify_realm_emoji(realm):
# type: (Realm) -> None
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
user_ids = [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
send_event(event, user_ids)
def check_add_realm_emoji(realm, name, img_url, author=None):
# type: (Realm, Text, Text, Optional[UserProfile]) -> None
emoji = RealmEmoji(realm=realm, name=name, img_url=img_url, author=author)
emoji.full_clean()
emoji.save()
notify_realm_emoji(realm)
def do_remove_realm_emoji(realm, name):
# type: (Realm, Text) -> None
RealmEmoji.objects.get(realm=realm, name=name).delete()
notify_realm_emoji(realm)
def notify_alert_words(user_profile, words):
# type: (UserProfile, Iterable[Text]) -> None
event = dict(type="alert_words", alert_words=words)
send_event(event, [user_profile.id])
def do_add_alert_words(user_profile, alert_words):
# type: (UserProfile, Iterable[Text]) -> None
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile, alert_words):
# type: (UserProfile, Iterable[Text]) -> None
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile, alert_words):
# type: (UserProfile, List[Text]) -> None
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_set_muted_topics(user_profile, muted_topics):
# type: (UserProfile, Union[List[List[Text]], List[Tuple[Text, Text]]]) -> None
user_profile.muted_topics = ujson.dumps(muted_topics)
user_profile.save(update_fields=['muted_topics'])
event = dict(type="muted_topics", muted_topics=muted_topics)
send_event(event, [user_profile.id])
def notify_realm_filters(realm):
# type: (Realm) -> None
realm_filters = realm_filters_for_realm(realm.id)
user_ids = [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(event, user_ids)
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm, pattern, url_format_string):
# type: (Realm, Text, Text) -> int
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm, pattern=None, id=None):
# type: (Realm, Optional[Text], Optional[int]) -> None
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, Text]
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_get_realm_aliases(realm):
# type: (Realm) -> List[Dict[str, Text]]
return list(realm.realmalias_set.values('id', 'domain'))
def do_add_realm_alias(realm, domain):
# type: (Realm, Text) -> (RealmAlias)
alias = RealmAlias(realm=realm, domain=domain)
alias.full_clean()
alias.save()
event = dict(type="realm_domains", op="add",
alias=dict(id=alias.id,
domain=alias.domain,
))
send_event(event, active_user_ids(realm))
return alias
def do_remove_realm_alias(realm, alias_id):
# type: (Realm, int) -> None
RealmAlias.objects.get(pk=alias_id).delete()
event = dict(type="realm_domains", op="remove", alias_id=alias_id)
send_event(event, active_user_ids(realm))
def get_occupied_streams(realm):
# type: (Realm) -> QuerySet
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,
user_profile__is_active=True).values('recipient_id')
stream_ids = Recipient.objects.filter(
type=Recipient.STREAM, id__in=subs_filter).values('type_id')
return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
def do_get_streams(user_profile, include_public=True, include_subscribed=True,
include_all_active=False, include_default=False):
# type: (UserProfile, bool, bool, bool, bool) -> List[Dict[str, Any]]
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
# Listing public streams are disabled for Zephyr mirroring realms.
include_public = include_public and not user_profile.realm.is_zephyr_mirror_realm
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if not include_all_active:
user_subs = Subscription.objects.select_related("recipient").filter(
active=True, user_profile=user_profile,
recipient__type=Recipient.STREAM)
if include_subscribed:
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
if include_public:
invite_only_check = Q(invite_only=False)
if include_subscribed and include_public:
query = query.filter(recipient_check | invite_only_check)
elif include_public:
query = query.filter(invite_only_check)
elif include_subscribed:
query = query.filter(recipient_check)
else:
# We're including nothing, so don't bother hitting the DB.
query = []
streams = [(row.to_dict()) for row in query]
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def do_claim_attachments(message):
# type: (Message) -> List[Tuple[Text, bool]]
attachment_url_list = attachment_url_re.findall(message.content)
results = []
for url in attachment_url_list:
path_id = attachment_url_to_path_id(url)
user_profile = message.sender
is_message_realm_public = False
if message.recipient.type == Recipient.STREAM:
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if path_id is not None:
is_claimed = claim_attachment(user_profile, path_id, message,
is_message_realm_public)
results.append((path_id, is_claimed))
return results
def do_delete_old_unclaimed_attachments(weeks_ago):
# type: (int) -> None
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(prev_content, message):
# type: (Text, Message) -> None
new_content = message.content
prev_attachments = set(attachment_url_re.findall(prev_content))
new_attachments = set(attachment_url_re.findall(new_content))
to_remove = list(prev_attachments - new_attachments)
path_ids = []
for url in to_remove:
path_id = attachment_url_to_path_id(url)
path_ids.append(path_id)
attachments_to_update = Attachment.objects.filter(path_id__in=path_ids).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message)
| {
"content_hash": "4b3b2151e75e537367f4358772b23be2",
"timestamp": "",
"source": "github",
"line_count": 3792,
"max_line_length": 223,
"avg_line_length": 44.38660337552743,
"alnum_prop": 0.6224081181601056,
"repo_name": "AZtheAsian/zulip",
"id": "dce86992dbd8649fd1ecffd03a163650739c4095",
"size": "168314",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/lib/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "255229"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "499614"
},
{
"name": "JavaScript",
"bytes": "1441148"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "85156"
},
{
"name": "Python",
"bytes": "3170699"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "37885"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureTemplateRecipientsResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'templateId': 'str',
'recipients': 'list[SignatureTemplateRecipientInfo]'
}
self.templateId = None # str
self.recipients = None # list[SignatureTemplateRecipientInfo]
| {
"content_hash": "7e9c7a616704d0781b63553b5fdb4af0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 31.78787878787879,
"alnum_prop": 0.684461391801716,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "ce5ada44cc098e3fce597b6efafae1b01fc52a00",
"size": "1071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "groupdocs/models/SignatureTemplateRecipientsResult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
def try_delete(filename):
try:
os.unlink(filename)
except:
if os.path.exists(filename):
shutil.rmtree(filename, ignore_errors=True)
class TempFiles:
def __init__(self, tmp, save_debug_files=False):
self.tmp = tmp
self.save_debug_files = save_debug_files
self.to_clean = []
def note(self, filename):
self.to_clean.append(filename)
def get(self, suffix):
"""Returns a named temp file with the given prefix."""
named_file = tempfile.NamedTemporaryFile(dir=self.tmp, suffix=suffix, delete=False)
self.note(named_file.name)
return named_file
def clean(self):
if self.save_debug_files:
import sys
print >> sys.stderr, 'not cleaning up temp files since in debug-save mode, see them in %s' % (self.tmp,)
return
for filename in self.to_clean:
try_delete(filename)
self.to_clean = []
def run_and_clean(self, func):
try:
return func()
finally:
self.clean()
| {
"content_hash": "3213dfed8c5c8b588c0116f12724171d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 110,
"avg_line_length": 24.73170731707317,
"alnum_prop": 0.6538461538461539,
"repo_name": "slightperturbation/Cobalt",
"id": "27da10829fae6158b7ec61e5d45d2a0db4905414",
"size": "1014",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ext/emsdk_portable/emscripten/1.27.0/tools/tempfiles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "7942339"
},
{
"name": "Batchfile",
"bytes": "27769"
},
{
"name": "C",
"bytes": "64431592"
},
{
"name": "C++",
"bytes": "192377551"
},
{
"name": "CMake",
"bytes": "2563457"
},
{
"name": "CSS",
"bytes": "32911"
},
{
"name": "DTrace",
"bytes": "12324"
},
{
"name": "Emacs Lisp",
"bytes": "11557"
},
{
"name": "Go",
"bytes": "132306"
},
{
"name": "Groff",
"bytes": "141757"
},
{
"name": "HTML",
"bytes": "10597275"
},
{
"name": "JavaScript",
"bytes": "7134930"
},
{
"name": "LLVM",
"bytes": "37169002"
},
{
"name": "Lua",
"bytes": "30196"
},
{
"name": "Makefile",
"bytes": "4368336"
},
{
"name": "Nix",
"bytes": "17734"
},
{
"name": "OCaml",
"bytes": "401898"
},
{
"name": "Objective-C",
"bytes": "492807"
},
{
"name": "PHP",
"bytes": "324917"
},
{
"name": "Perl",
"bytes": "27878"
},
{
"name": "Prolog",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "3678053"
},
{
"name": "Shell",
"bytes": "3047898"
},
{
"name": "SourcePawn",
"bytes": "2461"
},
{
"name": "Standard ML",
"bytes": "2841"
},
{
"name": "TeX",
"bytes": "120660"
},
{
"name": "VimL",
"bytes": "13743"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import os
from virtualenv.util.path import Path
from ..via_template import ViaTemplateActivator
class BatchActivator(ViaTemplateActivator):
@classmethod
def supports(cls, interpreter):
return interpreter.os == "nt"
def templates(self):
yield Path("activate.bat")
yield Path("deactivate.bat")
yield Path("pydoc.bat")
def instantiate_template(self, replacements, template, creator):
# ensure the text has all newlines as \r\n - required by batch
base = super(BatchActivator, self).instantiate_template(replacements, template, creator)
return base.replace(os.linesep, "\n").replace("\n", os.linesep)
| {
"content_hash": "ef636a6c86098c27364f9964b0d3246a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 96,
"avg_line_length": 31.869565217391305,
"alnum_prop": 0.6998635743519782,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "4149712d879f3fa72792d4e8d8e885590af1d9b9",
"size": "733",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/virtualenv/activation/batch/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest import config
from tempest.lib.common import rest_client
from tempest.lib.services.compute import flavors_client as flavor_cli
from tempest.lib.services.compute import floating_ips_client as floatingip_cli
from tempest.lib.services.compute import networks_client as network_cli
from tempest.lib.services.compute import servers_client as server_cli
from tempest import manager
from tempest.services.object_storage import container_client as container_cli
from tempest.services.object_storage import object_client as obj_cli
from ceilometer.tests.tempest.service.images.v1 import images_client as \
img_cli_v1
from ceilometer.tests.tempest.service.images.v2 import images_client as \
img_cli_v2
CONF = config.CONF
class TelemetryClient(rest_client.RestClient):
version = '2'
uri_prefix = "v2"
def deserialize(self, body):
return json.loads(body.replace("\n", ""))
def serialize(self, body):
return json.dumps(body)
def create_sample(self, meter_name, sample_list):
uri = "%s/meters/%s" % (self.uri_prefix, meter_name)
body = self.serialize(sample_list)
resp, body = self.post(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBody(resp, body)
def _helper_list(self, uri, query=None, period=None):
uri_dict = {}
if query:
uri_dict = {'q.field': query[0],
'q.op': query[1],
'q.value': query[2]}
if period:
uri_dict['period'] = period
if uri_dict:
uri += "?%s" % urllib.urlencode(uri_dict)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBodyList(resp, body)
def list_resources(self, query=None):
uri = '%s/resources' % self.uri_prefix
return self._helper_list(uri, query)
def list_meters(self, query=None):
uri = '%s/meters' % self.uri_prefix
return self._helper_list(uri, query)
def list_statistics(self, meter, period=None, query=None):
uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter)
return self._helper_list(uri, query, period)
def list_samples(self, meter_id, query=None):
uri = '%s/meters/%s' % (self.uri_prefix, meter_id)
return self._helper_list(uri, query)
def list_events(self, query=None):
uri = '%s/events' % self.uri_prefix
return self._helper_list(uri, query)
def show_resource(self, resource_id):
uri = '%s/resources/%s' % (self.uri_prefix, resource_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBody(resp, body)
class Manager(manager.Manager):
load_clients = [
'servers_client',
'compute_networks_client',
'compute_floating_ips_client',
'flavors_client',
'image_client',
'image_client_v2',
'telemetry_client',
'container_client',
'object_client',
]
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
compute_params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout,
}
compute_params.update(default_params)
image_params = {
'catalog_type': CONF.image.catalog_type,
'region': CONF.image.region or CONF.identity.region,
'endpoint_type': CONF.image.endpoint_type,
'build_interval': CONF.image.build_interval,
'build_timeout': CONF.image.build_timeout,
}
image_params.update(default_params)
telemetry_params = {
'service': CONF.telemetry.catalog_type,
'region': CONF.identity.region,
'endpoint_type': CONF.telemetry.endpoint_type,
}
telemetry_params.update(default_params)
object_storage_params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
object_storage_params.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials)
for client in self.load_clients:
getattr(self, 'set_%s' % client)()
def set_servers_client(self):
self.servers_client = server_cli.ServersClient(
self.auth_provider,
**self.compute_params)
def set_compute_networks_client(self):
self.compute_networks_client = network_cli.NetworksClient(
self.auth_provider,
**self.compute_params)
def set_compute_floating_ips_client(self):
self.compute_floating_ips_client = floatingip_cli.FloatingIPsClient(
self.auth_provider,
**self.compute_params)
def set_flavors_client(self):
self.flavors_client = flavor_cli.FlavorsClient(
self.auth_provider,
**self.compute_params)
def set_image_client(self):
self.image_client = img_cli_v1.ImagesClient(
self.auth_provider,
**self.image_params)
def set_image_client_v2(self):
self.image_client_v2 = img_cli_v2.ImagesClient(
self.auth_provider,
**self.image_params)
def set_telemetry_client(self):
self.telemetry_client = TelemetryClient(self.auth_provider,
**self.telemetry_params)
def set_container_client(self):
self.container_client = container_cli.ContainerClient(
self.auth_provider,
**self.object_storage_params)
def set_object_client(self):
self.object_client = obj_cli.ObjectClient(
self.auth_provider,
**self.object_storage_params)
| {
"content_hash": "32b8990b401b834b8bae6838ecc0d216",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 78,
"avg_line_length": 34.55135135135135,
"alnum_prop": 0.6334480600750939,
"repo_name": "ityaptin/ceilometer",
"id": "34bb33f21ee89b27fe7dd1bd2447a9447777f421",
"size": "7028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/tempest/service/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2532735"
},
{
"name": "Shell",
"bytes": "29938"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.