code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
for tablename in tables:
table = ts[tablename]
table[:] = []
if buffer_size is not None and table.is_attached():
table.write(append=False)
|
def _prepare_target(ts, tables, buffer_size)
|
Clear tables affected by the processing.
| 7.113233
| 6.152489
| 1.156155
|
tablename, fields = get_data_specifier(selector)
if len(fields) != 1:
raise ItsdbError(
'Selector must specify exactly one data column: {}'
.format(selector)
)
if isinstance(source, TestSuite):
if not tablename:
tablename = source.relations.find(fields[0])[0]
source = source[tablename]
cols = list(source.fields.keys()) + fields
return source, cols
|
def _prepare_source(selector, source)
|
Normalize source rows and selectors.
| 6.585048
| 6.341543
| 1.038398
|
fields = table.fields
# remove any keys that aren't relation fields
for invalid_key in set(data).difference([f.name for f in fields]):
del data[invalid_key]
table.append(Record.from_dict(fields, data))
# write if requested and possible
if buffer_size is not None and table.is_attached():
# for now there isn't a public method to get the number of new
# records, so use private members
if (len(table) - 1) - table._last_synced_index > buffer_size:
table.commit()
|
def _add_record(table, data, buffer_size)
|
Prepare and append a Record into its Table; flush to disk if necessary.
| 6.913041
| 6.484627
| 1.066066
|
match = data_specifier_re.match(string)
if match is None:
return (None, None)
table = match.group('table')
if table is not None:
table = table.strip()
cols = _split_cols(match.group('cols'))
return (table, cols)
|
def get_data_specifier(string)
|
Return a tuple (table, col) for some [incr tsdb()] data specifier.
For example::
item -> ('item', None)
item:i-input -> ('item', ['i-input'])
item:i-input@i-wf -> ('item', ['i-input', 'i-wf'])
:i-input -> (None, ['i-input'])
(otherwise) -> (None, None)
| 2.725859
| 2.307394
| 1.181358
|
cols = line.rstrip('\n').split(_field_delimiter)
cols = list(map(unescape, cols))
if fields is not None:
if len(cols) != len(fields):
raise ItsdbError(
'Wrong number of fields: {} != {}'
.format(len(cols), len(fields))
)
for i in range(len(cols)):
col = cols[i]
if col:
field = fields[i]
col = _cast_to_datatype(col, field)
cols[i] = col
return cols
|
def decode_row(line, fields=None)
|
Decode a raw line from a profile into a list of column values.
Decoding involves splitting the line by the field delimiter
(`"@"` by default) and unescaping special characters. If *fields*
is given, cast the values into the datatype given by their
respective Field object.
Args:
line: a raw line from a [incr tsdb()] profile.
fields: a list or Relation object of Fields for the row
Returns:
A list of column values.
| 2.886012
| 2.824607
| 1.021739
|
# NOTE: str(f) only works for Python3
unicode_fields = [unicode(f) for f in fields]
escaped_fields = map(escape, unicode_fields)
return _field_delimiter.join(escaped_fields)
|
def encode_row(fields)
|
Encode a list of column values into a [incr tsdb()] profile line.
Encoding involves escaping special characters for each value, then
joining the values into a single string with the field delimiter
(`"@"` by default). It does not fill in default values (see
make_row()).
Args:
fields: a list of column values
Returns:
A [incr tsdb()]-encoded string
| 5.546771
| 5.548835
| 0.999628
|
tbl_filename = str(tbl_filename) # convert any Path objects
txfn = _normalize_table_path(tbl_filename)
gzfn = txfn + '.gz'
if os.path.exists(txfn):
if (os.path.exists(gzfn) and
os.stat(gzfn).st_mtime > os.stat(txfn).st_mtime):
tbl_filename = gzfn
else:
tbl_filename = txfn
elif os.path.exists(gzfn):
tbl_filename = gzfn
else:
raise ItsdbError(
'Table does not exist at {}(.gz)'
.format(tbl_filename)
)
return tbl_filename
|
def _table_filename(tbl_filename)
|
Determine if the table path should end in .gz or not and return it.
A .gz path is preferred only if it exists and is newer than any
regular text file path.
Raises:
:class:`delphin.exceptions.ItsdbError`: when neither the .gz
nor text file exist.
| 3.182276
| 2.856774
| 1.11394
|
path = _table_filename(tbl_filename)
if path.endswith('.gz'):
# gzip.open() cannot use mode='rt' until Python2.7 support
# is gone; until then use TextIOWrapper
gzfile = GzipFile(path, mode='r')
gzfile.read1 = gzfile.read # Python2 hack
with TextIOWrapper(gzfile, encoding=encoding) as f:
yield f
else:
with io.open(path, encoding=encoding) as f:
yield f
|
def _open_table(tbl_filename, encoding)
|
Transparently open the compressed or text table file.
Can be used as a context manager in a 'with' statement.
| 4.661451
| 4.671139
| 0.997926
|
if not hasattr(row, 'get'):
row = {f.name: col for f, col in zip(fields, row)}
row_fields = []
for f in fields:
val = row.get(f.name, None)
if val is None:
val = str(f.default_value())
row_fields.append(val)
return encode_row(row_fields)
|
def make_row(row, fields)
|
Encode a mapping of column name to values into a [incr tsdb()]
profile line. The *fields* parameter determines what columns are
used, and default values are provided if a column is missing from
the mapping.
Args:
row: a mapping of column names to values
fields: an iterable of :class:`Field` objects
Returns:
A [incr tsdb()]-encoded string
| 2.99644
| 2.99614
| 1.0001
|
mode = mode.lower()
if mode == 'list':
modecast = lambda cols, data: data
elif mode == 'dict':
modecast = lambda cols, data: dict(zip(cols, data))
elif mode == 'row':
modecast = lambda cols, data: encode_row(data)
else:
raise ItsdbError('Invalid mode for select operation: {}\n'
' Valid options include: list, dict, row'
.format(mode))
for row in rows:
try:
data = [row.get(c, cast=cast) for c in cols]
except TypeError:
data = [row.get(c) for c in cols]
yield modecast(cols, data)
|
def select_rows(cols, rows, mode='list', cast=True)
|
Yield data selected from rows.
It is sometimes useful to select a subset of data from a profile.
This function selects the data in *cols* from *rows* and yields it
in a form specified by *mode*. Possible values of *mode* are:
================== ================= ==========================
mode description example `['i-id', 'i-wf']`
================== ================= ==========================
`'list'` (default) a list of values `[10, 1]`
`'dict'` col to value map `{'i-id': 10,'i-wf': 1}`
`'row'` [incr tsdb()] row `'10@1'`
================== ================= ==========================
Args:
cols: an iterable of column names to select data for
rows: the rows to select column data from
mode: the form yielded data should take
cast: if `True`, cast column values to their datatype
(requires *rows* to be :class:`Record` objects)
Yields:
Selected data in the form specified by *mode*.
| 2.935533
| 3.088461
| 0.950484
|
matched = OrderedDict()
for i, rows in enumerate([rows1, rows2]):
for row in rows:
val = row[key]
try:
data = matched[val]
except KeyError:
matched[val] = ([], [])
data = matched[val]
data[i].append(row)
vals = matched.keys()
if sort_keys:
vals = sorted(vals, key=safe_int)
for val in vals:
left, right = matched[val]
yield (val, left, right)
|
def match_rows(rows1, rows2, key, sort_keys=True)
|
Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order
| 2.591495
| 2.823632
| 0.917788
|
if how not in ('inner', 'left'):
ItsdbError('Only \'inner\' and \'left\' join methods are allowed.')
# validate and normalize the pivot
on = _join_pivot(on, table1, table2)
# the fields of the joined table
fields = _RelationJoin(table1.fields, table2.fields, on=on)
# get key mappings to the right side (useful for inner and left joins)
get_key = lambda rec: tuple(rec.get(k) for k in on)
key_indices = set(table2.fields.index(k) for k in on)
right = defaultdict(list)
for rec in table2:
right[get_key(rec)].append([c for i, c in enumerate(rec)
if i not in key_indices])
# build joined table
rfill = [f.default_value() for f in table2.fields if f.name not in on]
joined = []
for lrec in table1:
k = get_key(lrec)
if how == 'left' or k in right:
joined.extend(lrec + rrec for rrec in right.get(k, [rfill]))
return Table(fields, joined)
|
def join(table1, table2, on=None, how='inner', name=None)
|
Join two tables and return the resulting Table object.
Fields in the resulting table have their names prefixed with their
corresponding table name. For example, when joining `item` and
`parse` tables, the `i-input` field of the `item` table will be
named `item:i-input` in the resulting Table. Pivot fields (those
in *on*) are only stored once without the prefix.
Both inner and left joins are possible by setting the *how*
parameter to `inner` and `left`, respectively.
.. warning::
Both *table2* and the resulting joined table will exist in
memory for this operation, so it is not recommended for very
large tables on low-memory systems.
Args:
table1 (:class:`Table`): the left table to join
table2 (:class:`Table`): the right table to join
on (str): the shared key to use for joining; if `None`, find
shared keys using the schemata of the tables
how (str): the method used for joining (`"inner"` or `"left"`)
name (str): the name assigned to the resulting table
| 4.337107
| 4.462309
| 0.971942
|
if fieldname in tsdb_coded_attributes:
return str(tsdb_coded_attributes[fieldname])
else:
return _default_datatype_values.get(datatype, '')
|
def default_value(fieldname, datatype)
|
Return the default value for a column.
If the column name (e.g. *i-wf*) is defined to have an idiosyncratic
value, that value is returned. Otherwise the default value for the
column's datatype is returned.
Args:
fieldname: the column name (e.g. `i-wf`)
datatype: the datatype of the column (e.g. `:integer`)
Returns:
The default value for the column.
.. deprecated:: v0.7.0
| 6.574527
| 8.13141
| 0.808535
|
try:
os.makedirs(path)
except OSError:
raise ItsdbError('Path already exists: {}.'.format(path))
import shutil
shutil.copyfile(relations, os.path.join(path, _relations_filename))
prof = ItsdbProfile(path, index=False)
prof.write_table('item', item_rows, gzip=gzip)
return prof
|
def make_skeleton(path, relations, item_rows, gzip=False)
|
Instantiate a new profile skeleton (only the relations file and
item file) from an existing relations file and a list of rows
for the item table. For standard relations files, it is suggested
to have, as a minimum, the `i-id` and `i-input` fields in the
item rows.
Args:
path: the destination directory of the skeleton---must not
already exist, as it will be created
relations: the path to the relations file
item_rows: the rows to use for the item file
gzip: if `True`, the item file will be compressed
Returns:
An ItsdbProfile containing the skeleton data (but the profile
data will already have been written to disk).
Raises:
:class:`delphin.exceptions.ItsdbError`: if the destination
directory could not be created.
.. deprecated:: v0.7.0
| 5.302227
| 3.34216
| 1.586467
|
for row in rows:
if all(condition(row, row.get(col))
for (cols, condition) in filters
for col in cols
if col is None or col in row):
yield row
|
def filter_rows(filters, rows)
|
Yield rows matching all applicable filters.
Filter functions have binary arity (e.g. `filter(row, col)`) where
the first parameter is the dictionary of row data, and the second
parameter is the data at one particular column.
Args:
filters: a tuple of (cols, filter_func) where filter_func will
be tested (filter_func(row, col)) for each col in cols where
col exists in the row
rows: an iterable of rows to filter
Yields:
Rows matching all applicable filters
.. deprecated:: v0.7.0
| 5.759062
| 5.515604
| 1.04414
|
for row in rows:
for (cols, function) in applicators:
for col in (cols or []):
value = row.get(col, '')
row[col] = function(row, value)
yield row
|
def apply_rows(applicators, rows)
|
Yield rows after applying the applicator functions to them.
Applicators are simple unary functions that return a value, and that
value is stored in the yielded row. E.g.
`row[col] = applicator(row[col])`. These are useful to, e.g., cast
strings to numeric datatypes, to convert formats stored in a cell,
extract features for machine learning, and so on.
Args:
applicators: a tuple of (cols, applicator) where the applicator
will be applied to each col in cols
rows: an iterable of rows for applicators to be called on
Yields:
Rows with specified column values replaced with the results of
the applicators
.. deprecated:: v0.7.0
| 3.532815
| 4.620479
| 0.764599
|
if self.name in tsdb_coded_attributes:
return tsdb_coded_attributes[self.name]
elif self.datatype == ':integer':
return -1
else:
return ''
|
def default_value(self)
|
Get the default value of the field.
| 7.538226
| 7.550917
| 0.998319
|
keys = self._keys
if keys is None:
keys = tuple(self[i].name for i in self.key_indices)
return keys
|
def keys(self)
|
Return the tuple of field names of key fields.
| 4.991955
| 3.280297
| 1.5218
|
if hasattr(source, 'read'):
relations = cls.from_string(source.read())
else:
with open(source) as f:
relations = cls.from_string(f.read())
return relations
|
def from_file(cls, source)
|
Instantiate Relations from a relations file.
| 2.585929
| 2.010736
| 1.286061
|
tables = []
seen = set()
current_table = None
lines = list(reversed(s.splitlines())) # to pop() in right order
while lines:
line = lines.pop().strip()
table_m = re.match(r'^(?P<table>\w.+):$', line)
field_m = re.match(r'\s*(?P<name>\S+)'
r'(\s+(?P<attrs>[^#]+))?'
r'(\s*#\s*(?P<comment>.*)$)?',
line)
if table_m is not None:
table_name = table_m.group('table')
if table_name in seen:
raise ItsdbError(
'Table {} already defined.'.format(table_name)
)
current_table = (table_name, [])
tables.append(current_table)
seen.add(table_name)
elif field_m is not None and current_table is not None:
name = field_m.group('name')
attrs = field_m.group('attrs').split()
datatype = attrs.pop(0)
key = ':key' in attrs
partial = ':partial' in attrs
comment = field_m.group('comment')
current_table[1].append(
Field(name, datatype, key, partial, comment)
)
elif line != '':
raise ItsdbError('Invalid line: ' + line)
return cls(tables)
|
def from_string(cls, s)
|
Instantiate Relations from a relations string.
| 2.58489
| 2.553594
| 1.012256
|
tablename, _, column = fieldname.rpartition(':')
if tablename and tablename in self._field_map[column]:
return tablename
else:
return self._field_map[fieldname]
|
def find(self, fieldname)
|
Return the list of tables that define the field *fieldname*.
| 5.846358
| 5.361042
| 1.090526
|
visited = set(source.split('+')) # split on + for joins
targets = set(target.split('+')) - visited
# ensure sources and targets exists
for tablename in visited.union(targets):
self[tablename]
# base case; nothing to do
if len(targets) == 0:
return []
paths = [[(tablename, None)] for tablename in visited]
while True:
newpaths = []
for path in paths:
laststep, pivot = path[-1]
if laststep in targets:
return path[1:]
else:
for key in self[laststep].keys():
for step in set(self.find(key)) - visited:
visited.add(step)
newpaths.append(path + [(step, key)])
if newpaths:
paths = newpaths
else:
break
raise ItsdbError('no relation path found from {} to {}'
.format(source, target))
|
def path(self, source, target)
|
Find the path of id fields connecting two tables.
This is just a basic breadth-first-search. The relations file
should be small enough to not be a problem.
Returns:
list: (table, fieldname) pairs describing the path from
the source to target tables
Raises:
:class:`delphin.exceptions.ItsdbError`: when no path is
found
Example:
>>> relations.path('item', 'result')
[('parse', 'i-id'), ('result', 'parse-id')]
>>> relations.path('parse', 'item')
[('item', 'i-id')]
>>> relations.path('item', 'item')
[]
| 4.805016
| 4.07423
| 1.179368
|
record = cls(fields, iterable)
record._tableref = weakref.ref(table)
record._rowid = rowid
return record
|
def _make(cls, fields, iterable, table, rowid)
|
Create a Record bound to a :class:`Table`.
This is a helper method for creating Records from rows of a
Table that is attached to a file. It is not meant to be called
directly. It specifies the row number and a weak reference to
the Table object so that when the Record is modified it is
kept in the Table's in-memory list (see Record.__setitem__()),
otherwise the changes would not be retained the next time the
record is requested from the Table. The use of a weak
reference to the Table is to avoid a circular reference and
thus allow it to be properly garbage collected.
| 3.634222
| 4.129262
| 0.880114
|
iterable = [None] * len(fields)
for key, value in mapping.items():
try:
index = fields.index(key)
except KeyError:
raise ItsdbError('Invalid field name(s): ' + key)
iterable[index] = value
return cls(fields, iterable)
|
def from_dict(cls, fields, mapping)
|
Create a Record from a dictionary of field mappings.
The *fields* object is used to determine the column indices
of fields in the mapping.
Args:
fields: the Relation schema for the table of this record
mapping: a dictionary or other mapping from field names to
column values
Returns:
a :class:`Record` object
| 3.799307
| 4.418481
| 0.859867
|
tablename, _, key = key.rpartition(':')
if tablename and tablename not in self.fields.name.split('+'):
raise ItsdbError('column requested from wrong table: {}'
.format(tablename))
try:
index = self.fields.index(key)
value = list.__getitem__(self, index)
except (KeyError, IndexError):
value = default
else:
if cast:
field = self.fields[index]
value = _cast_to_datatype(value, field)
return value
|
def get(self, key, default=None, cast=True)
|
Return the field data given by field name *key*.
Args:
key: the field name of the data to return
default: the value to return if *key* is not in the row
| 5.208039
| 5.264141
| 0.989343
|
path = _table_filename(path) # do early in case file not found
if fields is None:
fields = _get_relation_from_table_path(path)
table = cls(fields)
table.attach(path, encoding=encoding)
return table
|
def from_file(cls, path, fields=None, encoding='utf-8')
|
Instantiate a Table from a database file.
This method instantiates a table attached to the file at *path*.
The file will be opened and traversed to determine the number of
records, but the contents will not be stored in memory unless
they are modified.
Args:
path: the path to the table file
fields: the Relation schema for the table (loaded from the
relations file in the same directory if not given)
encoding: the character encoding of the file at *path*
| 7.501962
| 7.993257
| 0.938536
|
if path is None:
if not self.is_attached():
raise ItsdbError('no path given for detached table')
else:
path = self.path
path = _normalize_table_path(path)
dirpath, name = os.path.split(path)
if fields is None:
fields = self.fields
if records is None:
records = iter(self)
_write_table(
dirpath,
name,
records,
fields,
append=append,
gzip=gzip,
encoding=self.encoding)
if self.is_attached() and path == _normalize_table_path(self.path):
self.path = _table_filename(path)
self._sync_with_file()
|
def write(self, records=None, path=None, fields=None, append=False,
gzip=None)
|
Write the table to disk.
The basic usage has no arguments and writes the table's data
to the attached file. The parameters accommodate a variety of
use cases, such as using *fields* to refresh a table to a
new schema or *records* and *append* to incrementally build a
table.
Args:
records: an iterable of :class:`Record` objects to write;
if `None` the table's existing data is used
path: the destination file path; if `None` use the
path of the file attached to the table
fields (:class:`Relation`): table schema to use for
writing, otherwise use the current one
append: if `True`, append rather than overwrite
gzip: compress with gzip if non-empty
Examples:
>>> table.write()
>>> table.write(results, path='new/path/result')
| 3.597474
| 3.771136
| 0.95395
|
if not self.is_attached():
return
changes = self.list_changes()
if changes:
indices, records = zip(*changes)
if min(indices) > self._last_synced_index:
self.write(records, append=True)
else:
self.write(append=False)
|
def commit(self)
|
Commit changes to disk if attached.
This method helps normalize the interface for detached and
attached tables and makes writing attached tables a bit more
efficient. For detached tables nothing is done, as there is no
notion of changes, but neither is an error raised (unlike with
:meth:`write`). For attached tables, if all changes are new
records, the changes are appended to the existing file, and
otherwise the whole file is rewritten.
| 6.09735
| 4.618403
| 1.320229
|
if self.is_attached():
raise ItsdbError('already attached at {}'.format(self.path))
try:
path = _table_filename(path)
except ItsdbError:
# neither path nor path.gz exist; create new empty file
# (note: if the file were non-empty this would be destructive)
path = _normalize_table_path(path)
open(path, 'w').close()
else:
# path or path.gz exists; check if merging would be a problem
if os.stat(path).st_size > 0 and len(self._records) > 0:
raise ItsdbError(
'cannot attach non-empty table to non-empty file')
self.path = path
self.encoding = encoding
# if _records is not empty then we're attaching to an empty file
if len(self._records) == 0:
self._sync_with_file()
|
def attach(self, path, encoding='utf-8')
|
Attach the Table to the file at *path*.
Attaching a table to a file means that only changed records
are stored in memory, which greatly reduces the memory
footprint of large profiles at some cost of
performance. Tables created from :meth:`Table.from_file()` or
from an attached :class:`TestSuite` are automatically
attached. Attaching a file does not immediately flush the
contents to disk; after attaching the table must be separately
written to commit the in-memory data.
A non-empty table will fail to attach to a non-empty file to
avoid data loss when merging the contents. In this case, you
may delete or clear the file, clear the table, or attach to
another file.
Args:
path: the path to the table file
encoding: the character encoding of the files in the testsuite
| 5.110928
| 4.94476
| 1.033605
|
if not self.is_attached():
raise ItsdbError('already detached')
records = self._records
for i, line in self._enum_lines():
if records[i] is None:
# check number of columns?
records[i] = tuple(decode_row(line))
self.path = None
self.encoding = None
|
def detach(self)
|
Detach the table from a file.
Detaching a table reads all data from the file and places it
in memory. This is useful when constructing or significantly
manipulating table data, or when more speed is needed. Tables
created by the default constructor are detached.
When detaching, only unmodified records are loaded from the
file; any uncommited changes in the Table are left as-is.
.. warning::
Very large tables may consume all available RAM when
detached. Expect the in-memory table to take up about
twice the space of an uncompressed table on disk, although
this may vary by system.
| 9.543262
| 9.069464
| 1.052241
|
if not self.is_attached():
raise ItsdbError('changes are not tracked for detached tables.')
return [(i, self[i]) for i, row in enumerate(self._records)
if row is not None]
|
def list_changes(self)
|
Return a list of modified records.
This is only applicable for attached tables.
Returns:
A list of `(row_index, record)` tuples of modified records
Raises:
:class:`delphin.exceptions.ItsdbError`: when called on a
detached table
| 10.969813
| 5.218683
| 2.102027
|
self._records = []
i = -1
for i, line in self._enum_lines():
self._records.append(None)
self._last_synced_index = i
|
def _sync_with_file(self)
|
Clear in-memory structures so table is synced with the file.
| 8.257522
| 6.690514
| 1.234213
|
with _open_table(self.path, self.encoding) as lines:
for i, line in enumerate(lines):
yield i, line
|
def _enum_lines(self)
|
Enumerate lines from the attached file.
| 5.410509
| 4.727314
| 1.144521
|
records = self._records
i = 0
# first rows covered by the file
for i, line in self._enum_lines():
if i in indices:
row = records[i]
if row is None:
row = decode_row(line)
yield (i, row)
# then any uncommitted rows
for j in range(i, len(records)):
if j in indices:
if records[j] is not None:
yield (j, records[j])
|
def _enum_attached_rows(self, indices)
|
Enumerate on-disk and in-memory records.
| 4.501292
| 4.168689
| 1.079786
|
indices = range(*slice.indices(len(self._records)))
if self.is_attached():
rows = self._enum_attached_rows(indices)
if slice.step is not None and slice.step < 0:
rows = reversed(list(rows))
else:
rows = zip(indices, self._records[slice])
fields = self.fields
for i, row in rows:
yield Record._make(fields, row, self, i)
|
def _iterslice(self, slice)
|
Yield records from a slice index.
| 4.38293
| 3.997643
| 1.096379
|
row = self._records[index]
if row is not None:
pass
elif self.is_attached():
# need to handle negative indices manually
if index < 0:
index = len(self._records) + index
row = next((decode_row(line)
for i, line in self._enum_lines()
if i == index),
None)
if row is None:
raise ItsdbError('could not retrieve row in attached table')
else:
raise ItsdbError('invalid row in detached table: {}'.format(index))
return Record._make(self.fields, row, self, index)
|
def _getitem(self, index)
|
Get a single non-slice index.
| 6.117461
| 6.162963
| 0.992617
|
fields = self.fields
for record in records:
record = _cast_record_to_str_tuple(record, fields)
self._records.append(record)
|
def extend(self, records)
|
Add each record in *records* to the end of the table.
Args:
record: an iterable of :class:`Record` or other iterables
containing column values
| 5.216232
| 7.207365
| 0.723736
|
if isinstance(cols, stringtypes):
cols = _split_cols(cols)
if not cols:
cols = [f.name for f in self.fields]
return select_rows(cols, self, mode=mode)
|
def select(self, cols, mode='list')
|
Select columns from each row in the table.
See :func:`select_rows` for a description of how to use the
*mode* parameter.
Args:
cols: an iterable of Field (column) names
mode: how to return the data
| 4.278949
| 5.816905
| 0.735606
|
if table is not None and table not in self.relations:
raise ItsdbError('Cannot add filter; table "{}" is not defined '
'by the relations file.'
.format(table))
# this is a hack, though perhaps well-motivated
if cols is None:
cols = [None]
self.filters[table].append((cols, condition))
|
def add_filter(self, table, cols, condition)
|
Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function.
| 8.131579
| 9.266183
| 0.877554
|
if table not in self.relations:
raise ItsdbError('Cannot add applicator; table "{}" is not '
'defined by the relations file.'
.format(table))
if cols is None:
raise ItsdbError('Cannot add applicator; columns not specified.')
fields = set(f.name for f in self.relations[table])
for col in cols:
if col not in fields:
raise ItsdbError('Cannot add applicator; column "{}" not '
'defined by the relations file.'
.format(col))
self.applicators[table].append((cols, function))
|
def add_applicator(self, table, cols, function)
|
Add an applicator. When reading *table*, rows in *table* will be
modified by apply_rows().
Args:
table: The table to apply the function to.
cols: The columns in *table* to apply the function on.
function: The applicator function.
| 3.032389
| 3.225603
| 0.9401
|
fields = self.table_relations(table) if self.cast else None
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
table_path = os.path.join(self.root, table)
with _open_table(table_path, self.encoding) as tbl:
for line in tbl:
cols = decode_row(line, fields=fields)
if len(cols) != field_len:
# should this throw an exception instead?
logging.error('Number of stored fields ({}) '
'differ from the expected number({}); '
'fields may be misaligned!'
.format(len(cols), field_len))
row = OrderedDict(zip(field_names, cols))
yield row
|
def read_raw_table(self, table)
|
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
| 4.442049
| 4.342393
| 1.02295
|
filters = self.filters[None] + self.filters[table]
if key_filter:
for f in self.relations[table]:
key = f.name
if f.key and (self._index.get(key) is not None):
ids = self._index[key]
# Can't keep local variables (like ids) in the scope of
# the lambda expression, so make it a default argument.
# Source: http://stackoverflow.com/a/938493/1441112
function = lambda r, x, ids=ids: x in ids
filters.append(([key], function))
applicators = self.applicators[table]
rows = self.read_raw_table(table)
return filter_rows(filters, apply_rows(applicators, rows))
|
def read_table(self, table, key_filter=True)
|
Yield rows in the [incr tsdb()] *table* that pass any defined
filters, and with values changed by any applicators. If no
filters or applicators are defined, the result is the same as
from ItsdbProfile.read_raw_table().
| 6.459701
| 6.031273
| 1.071034
|
if cols is None:
cols = [c.name for c in self.relations[table]]
rows = self.read_table(table, key_filter=key_filter)
for row in select_rows(cols, rows, mode=mode):
yield row
|
def select(self, table, cols, mode='list', key_filter=True)
|
Yield selected rows from *table*. This method just calls
select_rows() on the rows read from *table*.
| 3.470863
| 2.888897
| 1.201449
|
get_keys = lambda t: (f.name for f in self.relations[t] if f.key)
keys = set(get_keys(table1)).intersection(get_keys(table2))
if not keys:
raise ItsdbError(
'Cannot join tables "{}" and "{}"; no shared key exists.'
.format(table1, table2)
)
key = keys.pop()
# this join method stores the whole of table2 in memory, but it is
# MUCH faster than a nested loop method. Most profiles will fit in
# memory anyway, so it's a decent tradeoff
table2_data = defaultdict(list)
for row in self.read_table(table2, key_filter=key_filter):
table2_data[row[key]].append(row)
for row1 in self.read_table(table1, key_filter=key_filter):
for row2 in table2_data.get(row1[key], []):
joinedrow = OrderedDict(
[('{}:{}'.format(table1, k), v)
for k, v in row1.items()] +
[('{}:{}'.format(table2, k), v)
for k, v in row2.items()]
)
yield joinedrow
|
def join(self, table1, table2, key_filter=True)
|
Yield rows from a table built by joining *table1* and *table2*.
The column names in the rows have the original table name
prepended and separated by a colon. For example, joining tables
'item' and 'parse' will result in column names like
'item:i-input' and 'parse:parse-id'.
| 3.301255
| 3.298157
| 1.000939
|
_write_table(self.root,
table,
rows,
self.table_relations(table),
append=append,
gzip=gzip,
encoding=self.encoding)
|
def write_table(self, table, rows, append=False, gzip=False)
|
Encode and write out *table* to the profile directory.
Args:
table: The name of the table to write
rows: The rows to write to the table
append: If `True`, append the encoded rows to any existing
data.
gzip: If `True`, compress the resulting table with `gzip`.
The table's filename will have `.gz` appended.
| 5.869174
| 6.626781
| 0.885675
|
if relations_filename:
src_rels = os.path.abspath(relations_filename)
relations = get_relations(relations_filename)
else:
src_rels = os.path.abspath(os.path.join(self.root,
_relations_filename))
relations = self.relations
tgt_rels = os.path.abspath(os.path.join(profile_directory,
_relations_filename))
if not (os.path.isfile(tgt_rels) and src_rels == tgt_rels):
with open(tgt_rels, 'w') as rel_fh:
print(open(src_rels).read(), file=rel_fh)
tables = self._tables
if tables is not None:
tables = set(tables)
for table, fields in relations.items():
if tables is not None and table not in tables:
continue
try:
fn = _table_filename(os.path.join(self.root, table))
_gzip = gzip if gzip is not None else fn.endswith('.gz')
rows = list(self.read_table(table, key_filter=key_filter))
_write_table(
profile_directory, table, rows, fields,
append=append, gzip=_gzip, encoding=self.encoding
)
except ItsdbError:
logging.warning(
'Could not write "{}"; table doesn\'t exist.'.format(table)
)
continue
self._cleanup(gzip=gzip)
|
def write_profile(self, profile_directory, relations_filename=None,
key_filter=True,
append=False, gzip=None)
|
Write all tables (as specified by the relations) to a profile.
Args:
profile_directory: The directory of the output profile
relations_filename: If given, read and use the relations
at this path instead of the current profile's
relations
key_filter: If True, filter the rows by keys in the index
append: If `True`, append profile data to existing tables
in the output profile directory
gzip: If `True`, compress tables using `gzip`. Table
filenames will have `.gz` appended. If `False`, only
write out text files. If `None`, use whatever the
original file was.
| 2.916019
| 2.865517
| 1.017624
|
if not os.path.isdir(self.root):
return False
if not os.path.isfile(os.path.join(self.root, _relations_filename)):
return False
if table is not None:
try:
_table_filename(os.path.join(self.root, table))
except ItsdbError:
return False
return True
|
def exists(self, table=None)
|
Return True if the profile or a table exist.
If *table* is `None`, this function returns True if the root
directory exists and contains a valid relations file. If *table*
is given, the function returns True if the table exists as a
file (even if empty). Otherwise it returns False.
| 3.403136
| 2.573203
| 1.322529
|
size = 0
if table is None:
for table in self.relations:
size += self.size(table)
else:
try:
fn = _table_filename(os.path.join(self.root, table))
size += os.stat(fn).st_size
except ItsdbError:
pass
return size
|
def size(self, table=None)
|
Return the size, in bytes, of the profile or *table*.
If *table* is `None`, this function returns the size of the
whole profile (i.e. the sum of the table sizes). Otherwise, it
returns the size of *table*.
Note: if the file is gzipped, it returns the compressed size.
| 4.345986
| 4.600173
| 0.944744
|
return next(parse_from_iterable([input], server, params, headers), None)
|
def parse(input, server=default_erg_server, params=None, headers=None)
|
Request a parse of *input* on *server* and return the response.
Args:
input (str): sentence to be parsed
server (str): the url for the server (the default LOGON server
is used by default)
params (dict): a dictionary of request parameters
headers (dict): a dictionary of additional request headers
Returns:
A ParseResponse containing the results, if the request was
successful.
Raises:
requests.HTTPError: if the status code was not 200
| 8.91826
| 20.851522
| 0.427703
|
client = DelphinRestClient(server)
for input in inputs:
yield client.parse(input, params=params, headers=headers)
|
def parse_from_iterable(
inputs,
server=default_erg_server,
params=None,
headers=None)
|
Request parses for all *inputs*.
Args:
inputs (iterable): sentences to parse
server (str): the url for the server (the default LOGON server
is used by default)
params (dict): a dictionary of request parameters
headers (dict): a dictionary of additional request headers
Yields:
ParseResponse objects for each successful response.
Raises:
requests.HTTPError: for the first response with a status code
that is not 200
| 6.946407
| 9.3388
| 0.743822
|
if params is None:
params = {}
params['input'] = sentence
hdrs = {'Accept': 'application/json'}
if headers is not None:
hdrs.update(headers)
url = urljoin(self.server, 'parse')
r = requests.get(url, params=params, headers=hdrs)
if r.status_code == 200:
return _RestResponse(r.json())
else:
r.raise_for_status()
|
def parse(self, sentence, params=None, headers=None)
|
Request a parse of *sentence* and return the response.
Args:
sentence (str): sentence to be parsed
params (dict): a dictionary of request parameters
headers (dict): a dictionary of additional request headers
Returns:
A ParseResponse containing the results, if the request was
successful.
Raises:
requests.HTTPError: if the status code was not 200
| 2.417772
| 2.447237
| 0.98796
|
global CRC_CCITT_TABLE
if not CRC_CCITT_TABLE:
crc_ccitt_table = []
for i in range(0, 256):
crc = 0
c = i << 8
for j in range(0, 8):
if (crc ^ c) & 0x8000:
crc = c_ushort(crc << 1).value ^ 0x1021
else:
crc = c_ushort(crc << 1).value
c = c_ushort(c << 1).value
crc_ccitt_table.append(crc)
CRC_CCITT_TABLE = crc_ccitt_table
is_string = _is_string(data)
crc_value = 0x0000 # XModem version
for c in data:
d = ord(c) if is_string else c
tmp = ((crc_value >> 8) & 0xff) ^ d
crc_value = ((crc_value << 8) & 0xff00) ^ CRC_CCITT_TABLE[tmp]
return crc_value
|
def _calculate_crc_ccitt(data)
|
All CRC stuff ripped from PyCRC, GPLv3 licensed
| 2.310087
| 2.279497
| 1.01342
|
if compression == 3:
data = base64.b64encode(zlib.compress(self.data.bytes))
data = ':Z64:%s:%s' % (data.decode('ascii'), self._calc_crc(data))
elif compression == 1:
data = base64.b64encode(self.data.bytes)
data = ':B64:%s:%s' % (data.decode('ascii'), self._calc_crc(data))
else:
lines = []
last_unique_line = None
for line in self.data.hex_rows:
if line.endswith('00'):
line = line.rstrip('0')
if len(line) % 2:
line += '0'
line += ','
if line == last_unique_line:
line = ':'
else:
last_unique_line = line
lines.append(line)
data = '\n'.join(lines)
to_compress = set(RE_UNCOMPRESSED.findall(data))
to_compress = sorted(to_compress, reverse=True)
for uncompressed in to_compress:
uncompressed = uncompressed[0]
repeat = len(uncompressed)
compressed = ''
while repeat >= 400:
compressed += 'z'
repeat -= 400
if repeat >= 20:
value = repeat // 20
repeat -= value * 20
compressed += chr(value + 70).lower()
if repeat > 0:
compressed += chr(repeat + 70)
data = data.replace(uncompressed, compressed + uncompressed[0])
data = data.replace('\n', '')
zpl = '~DGR:%s.GRF,%s,%s,%s' % (
self.filename,
self.data.filesize,
self.data.width // 8,
data
)
return zpl
|
def to_zpl_line(self, compression=3, **kwargs)
|
Compression:
3 = ZB64/Z64, base64 encoded DEFLATE compressed - best compression
2 = ASCII hex encoded run length compressed - most compatible
1 = B64, base64 encoded - pointless?
| 3.230173
| 3.128859
| 1.032381
|
zpl = [
self.to_zpl_line(**kwargs), # Download image to printer
'^XA', # Start Label Format
'^MM%s,Y' % print_mode,
'^PO%s' % print_orientation,
'^MN%s' % media_tracking,
'^FO0,0', # Field Origin to 0,0
'^XGR:%s.GRF,1,1' % self.filename, # Draw image
'^FS', # Field Separator
'^PQ%s,%s,0,%s' % (
int(quantity), # Print Quantity
int(pause_and_cut), # Pause and cut every N labels
'Y' if override_pause else 'N' # Don't pause between cuts
),
'^XZ', # End Label Format
'^IDR:%s.GRF' % self.filename # Delete image from printer
]
return ''.join(zpl)
|
def to_zpl(
self, quantity=1, pause_and_cut=0, override_pause=False,
print_mode='C', print_orientation='N', media_tracking='Y', **kwargs
)
|
The most basic ZPL to print the GRF. Since ZPL printers are stateful
this may not work and you may need to build your own.
| 5.726034
| 5.403145
| 1.059759
|
source = Image.open(BytesIO(image))
source = source.convert('1')
width = int(math.ceil(source.size[0] / 8.0))
data = []
for line in _chunked(list(source.getdata()), source.size[0]):
row = ''.join(['0' if p else '1' for p in line])
row = row.ljust(width * 8, '0')
data.append(row)
data = GRFData(width, bin=''.join(data))
return cls(filename, data)
|
def from_image(cls, image, filename)
|
Filename is 1-8 alphanumeric characters to identify the GRF in ZPL.
| 4.011297
| 3.65957
| 1.096112
|
# Most arguments below are based on what CUPS uses
setpagedevice = [
'/.HWMargins[0.000000 0.000000 0.000000 0.000000]',
'/Margins[0 0]'
]
cmd = [
'gs',
'-dQUIET',
'-dPARANOIDSAFER',
'-dNOPAUSE',
'-dBATCH',
'-dNOINTERPOLATE',
'-sDEVICE=pngmono',
'-dAdvanceDistance=1000',
'-r%s' % int(dpi),
'-dDEVICEWIDTHPOINTS=%s' % int(width),
'-dDEVICEHEIGHTPOINTS=%s' % int(height),
'-dFIXEDMEDIA',
'-dPDFFitPage',
'-c',
'<<%s>>setpagedevice' % ' '.join(setpagedevice)
]
if center_of_pixel:
cmd += ['0 .setfilladjust']
if font_path and os.path.exists(font_path):
cmd += ['-I' + font_path]
if use_bindings:
import ghostscript
# python-ghostscript doesn't like reading/writing from
# stdin/stdout so we need to use temp files
with tempfile.NamedTemporaryFile() as in_file, \
tempfile.NamedTemporaryFile() as out_file:
in_file.write(pdf)
in_file.flush()
# Ghostscript seems to be sensitive to argument order
cmd[13:13] += [
'-sOutputFile=%s' % out_file.name
]
cmd += [
'-f', in_file.name
]
try:
ghostscript.Ghostscript(*[c.encode('ascii') for c in cmd])
except Exception as e:
raise GRFException(e)
pngs = out_file.read()
else:
from subprocess import PIPE, Popen
# Ghostscript seems to be sensitive to argument order
cmd[13:13] += [
'-sstdout=%stderr',
'-sOutputFile=%stdout',
]
cmd += [
'-f', '-'
]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
pngs, stderr = p.communicate(pdf)
if stderr:
raise GRFException(stderr)
# This is what PIL uses to identify PNGs
png_start = b'\211PNG\r\n\032\n'
grfs = []
for png in pngs.split(png_start)[1:]:
grfs.append(cls.from_image(png_start + png, filename))
return grfs
|
def from_pdf(
cls, pdf, filename, width=288, height=432, dpi=203, font_path=None,
center_of_pixel=False, use_bindings=False
)
|
Filename is 1-8 alphanumeric characters to identify the GRF in ZPL.
Dimensions and DPI are for a typical 4"x6" shipping label.
E.g. 432 points / 72 points in an inch / 203 dpi = 6 inches
Using center of pixel will improve barcode quality but may decrease
the quality of some text.
use_bindings=False:
- Uses subprocess.Popen
- Forks so there is a memory spike
- Easier to setup - only needs the gs binary
use_bindings=True:
- Uses python-ghostscript
- Doesn't fork so should use less memory
- python-ghostscript is a bit buggy
- May be harder to setup - even if you have updated the gs binary
there may stil be old libgs* files on your system
| 3.737346
| 3.610997
| 1.03499
|
re_bars = re.compile(r'1{%s,}' % min_bar_height)
bars = {}
for i, line in enumerate(data):
for match in re_bars.finditer(line):
try:
bars[match.span()].append(i)
except KeyError:
bars[match.span()] = [i]
grouped_bars = []
for span, seen_at in bars.items():
group = []
for coords in seen_at:
if group and coords - group[-1] > max_gap_size:
grouped_bars.append((span, group))
group = []
group.append(coords)
grouped_bars.append((span, group))
suspected_barcodes = []
for span, seen_at in grouped_bars:
if len(seen_at) < min_bar_count:
continue
pc_white = len(seen_at) / float(seen_at[-1] - seen_at[0])
if pc_white >= min_percent_white and pc_white <= max_percent_white:
suspected_barcodes.append((span, seen_at))
for span, seen_at in suspected_barcodes:
barcode = []
for line in data[seen_at[0]:seen_at[-1]+1]:
barcode.append(line[span[0]])
barcode = ''.join(barcode)
# Do the actual optimisation
barcode = self._optimise_barcode(barcode)
barcode = list(barcode)
barcode.reverse()
width = span[1] - span[0]
for i in range(seen_at[0], seen_at[-1]+1):
line = data[i]
line = (
line[:span[0]] + (barcode.pop() * width) + line[span[1]:]
)
data[i] = line
return data
|
def _optimise_barcodes(
self, data, min_bar_height=20, min_bar_count=100, max_gap_size=30,
min_percent_white=0.2, max_percent_white=0.8, **kwargs
)
|
min_bar_height = Minimum height of black bars in px. Set this too
low and it might pick up text and data matrices,
too high and it might pick up borders, tables, etc.
min_bar_count = Minimum number of parallel black bars before a
pattern is considered a potential barcode.
max_gap_size = Biggest white gap in px allowed between black bars.
This is only important if you have multiple
barcodes next to each other.
min_percent_white = Minimum percentage of white bars between black
bars. This helps to ignore solid rectangles.
max_percent_white = Maximum percentage of white bars between black
bars. This helps to ignore solid rectangles.
| 2.223352
| 2.178087
| 1.020782
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['id'] = u(array.get('id'))
data['title'] = u(array.get('title'))
data['prices'] = LabeledPrice.from_array_list(array.get('prices'), list_level=1)
instance = ShippingOption(**data)
instance._raw = array
return instance
|
def from_array(array)
|
Deserialize a new ShippingOption from a given dictionary.
:return: new ShippingOption instance.
:rtype: ShippingOption
| 3.725496
| 2.948265
| 1.263623
|
s1 = _first_cap_re.sub(r'\1_\2', name)
return _all_cap_re.sub(r'\1_\2', s1).lower()
|
def convert_to_underscore(name)
|
"someFunctionWhatever" -> "some_function_whatever"
| 2.163471
| 2.096856
| 1.031769
|
description_with_tabs = "\t\t" + description.strip().replace("\n", "\n\t\t")
param_list_args = []
param_list_kwargs = []
args = []
args2 = []
kwargs = []
kwargs2 = []
asserts = []
str_args = ""
str_kwargs = ""
param_strings = params_string.split("\n")
for param in param_strings:
assert_commands, assert_comments, param_name, param_type, table, non_buildin_type, param_name_input = parse_param_types(param)
param_required = table[2].strip()
param_needed = None
if param_required == "Yes":
param_needed = True
elif param_required == "Optional":
param_needed = False
param_description = table[3].strip()
if param_needed:
param_list_args.append(Param(param_name, param_type,param_needed, param_description))
args.append(param_name)
args2.append("{param_name}={param_name}".format(param_name=param_name))
str_args += '\t\t:param {key}: {descr}\n\t\t:type {key}: {type}\n\n'.format(key=param_name, descr=param_description, type=param_type)
if assert_commands:
asserts.append("assert({var} is not None)".format(var=param_name))
asserts.append("assert({ass})".format(ass=" or ".join(assert_commands)) + ((" # {comment}".format(comment=", ".join(assert_comments))) if assert_comments else ""))
else:
param_list_kwargs.append(Param(param_name, param_type,param_needed, param_description))
kwargs.append("{param_name}=None".format(param_name=param_name))
kwargs2.append("{param_name}={param_name}".format(param_name=param_name))
str_kwargs += '\t\t:keyword {key}: {descr}\n\t\t:type {key}: {type}\n\n'.format(key=param_name, descr=param_description, type=param_type)
if assert_commands:
asserts.append("assert({var} is None or {ass})".format(var=param_name, ass=" or ".join(assert_commands)) + ((" # {comment}".format(comment=", ".join(assert_comments))) if assert_comments else ""))
args.extend(kwargs)
args2.extend(kwargs2)
asserts_string = "\n\t\t" + "\n\t\t".join(asserts)
text = ""
if len(str_args)>0:
text += '\n\t\tParameters:\n\n'
text += str_args
if len(str_kwargs)>0:
text += '\n\t\tOptional keyword parameters:\n\n'
text += str_kwargs
do_args = ['"%s"' % command]
do_args.extend(args2)
result = '\tdef {funcname}(self, {params}):\n\t\t{asserts_with_tabs}\n\t\treturn self.do({do_args})\n\t# end def {funcname}'.format(
funcname=convert_to_underscore(command),
params=", ".join(args), description_with_tabs=description_with_tabs, link=link,
returns=returns, return_type=return_type, command=command, do_args=", ".join(do_args),
asserts_with_tabs=asserts_string,
paramshit = text
)
result = result.replace("\t", " ")
return result
|
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message")
|
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
| 2.446068
| 2.446321
| 0.999896
|
array = super(User, self).to_array()
array['id'] = int(self.id) # type int
array['is_bot'] = bool(self.is_bot) # type bool
array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str
if self.last_name is not None:
array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str
if self.username is not None:
array['username'] = u(self.username) # py2: type unicode, py3: type str
if self.language_code is not None:
array['language_code'] = u(self.language_code) # py2: type unicode, py3: type str
return array
|
def to_array(self)
|
Serializes this User to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.677833
| 1.649618
| 1.017104
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['id'] = int(array.get('id'))
data['is_bot'] = bool(array.get('is_bot'))
data['first_name'] = u(array.get('first_name'))
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['username'] = u(array.get('username')) if array.get('username') is not None else None
data['language_code'] = u(array.get('language_code')) if array.get('language_code') is not None else None
data['_raw'] = array
return User(**data)
|
def from_array(array)
|
Deserialize a new User from a given dictionary.
:return: new User instance.
:rtype: User
| 1.936285
| 1.695146
| 1.142253
|
array = super(Chat, self).to_array()
array['id'] = int(self.id) # type int
array['type'] = u(self.type) # py2: type unicode, py3: type str
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.username is not None:
array['username'] = u(self.username) # py2: type unicode, py3: type str
if self.first_name is not None:
array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str
if self.last_name is not None:
array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str
if self.all_members_are_administrators is not None:
array['all_members_are_administrators'] = bool(self.all_members_are_administrators) # type bool
if self.photo is not None:
array['photo'] = self.photo.to_array() # type ChatPhoto
if self.description is not None:
array['description'] = u(self.description) # py2: type unicode, py3: type str
if self.invite_link is not None:
array['invite_link'] = u(self.invite_link) # py2: type unicode, py3: type str
if self.pinned_message is not None:
array['pinned_message'] = self.pinned_message.to_array() # type Message
if self.sticker_set_name is not None:
array['sticker_set_name'] = u(self.sticker_set_name) # py2: type unicode, py3: type str
if self.can_set_sticker_set is not None:
array['can_set_sticker_set'] = bool(self.can_set_sticker_set) # type bool
return array
|
def to_array(self)
|
Serializes this Chat to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.311976
| 1.299958
| 1.009245
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import ChatPhoto
from pytgbot.api_types.receivable.updates import Message
data = {}
data['id'] = int(array.get('id'))
data['type'] = u(array.get('type'))
data['title'] = u(array.get('title')) if array.get('title') is not None else None
data['username'] = u(array.get('username')) if array.get('username') is not None else None
data['first_name'] = u(array.get('first_name')) if array.get('first_name') is not None else None
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['all_members_are_administrators'] = bool(array.get('all_members_are_administrators')) if array.get('all_members_are_administrators') is not None else None
data['photo'] = ChatPhoto.from_array(array.get('photo')) if array.get('photo') is not None else None
data['description'] = u(array.get('description')) if array.get('description') is not None else None
data['invite_link'] = u(array.get('invite_link')) if array.get('invite_link') is not None else None
data['pinned_message'] = Message.from_array(array.get('pinned_message')) if array.get('pinned_message') is not None else None
data['sticker_set_name'] = u(array.get('sticker_set_name')) if array.get('sticker_set_name') is not None else None
data['can_set_sticker_set'] = bool(array.get('can_set_sticker_set')) if array.get('can_set_sticker_set') is not None else None
data['_raw'] = array
return Chat(**data)
|
def from_array(array)
|
Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat
| 1.491194
| 1.387501
| 1.074734
|
array = super(ChatMember, self).to_array()
array['user'] = self.user.to_array() # type User
array['status'] = u(self.status) # py2: type unicode, py3: type str
if self.until_date is not None:
array['until_date'] = int(self.until_date) # type int
if self.can_be_edited is not None:
array['can_be_edited'] = bool(self.can_be_edited) # type bool
if self.can_change_info is not None:
array['can_change_info'] = bool(self.can_change_info) # type bool
if self.can_post_messages is not None:
array['can_post_messages'] = bool(self.can_post_messages) # type bool
if self.can_edit_messages is not None:
array['can_edit_messages'] = bool(self.can_edit_messages) # type bool
if self.can_delete_messages is not None:
array['can_delete_messages'] = bool(self.can_delete_messages) # type bool
if self.can_invite_users is not None:
array['can_invite_users'] = bool(self.can_invite_users) # type bool
if self.can_restrict_members is not None:
array['can_restrict_members'] = bool(self.can_restrict_members) # type bool
if self.can_pin_messages is not None:
array['can_pin_messages'] = bool(self.can_pin_messages) # type bool
if self.can_promote_members is not None:
array['can_promote_members'] = bool(self.can_promote_members) # type bool
if self.can_send_messages is not None:
array['can_send_messages'] = bool(self.can_send_messages) # type bool
if self.can_send_media_messages is not None:
array['can_send_media_messages'] = bool(self.can_send_media_messages) # type bool
if self.can_send_other_messages is not None:
array['can_send_other_messages'] = bool(self.can_send_other_messages) # type bool
if self.can_add_web_page_previews is not None:
array['can_add_web_page_previews'] = bool(self.can_add_web_page_previews) # type bool
return array
|
def to_array(self)
|
Serializes this ChatMember to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.221077
| 1.181472
| 1.033522
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.peer import User
data = {}
data['user'] = User.from_array(array.get('user'))
data['status'] = u(array.get('status'))
data['until_date'] = int(array.get('until_date')) if array.get('until_date') is not None else None
data['can_be_edited'] = bool(array.get('can_be_edited')) if array.get('can_be_edited') is not None else None
data['can_change_info'] = bool(array.get('can_change_info')) if array.get('can_change_info') is not None else None
data['can_post_messages'] = bool(array.get('can_post_messages')) if array.get('can_post_messages') is not None else None
data['can_edit_messages'] = bool(array.get('can_edit_messages')) if array.get('can_edit_messages') is not None else None
data['can_delete_messages'] = bool(array.get('can_delete_messages')) if array.get('can_delete_messages') is not None else None
data['can_invite_users'] = bool(array.get('can_invite_users')) if array.get('can_invite_users') is not None else None
data['can_restrict_members'] = bool(array.get('can_restrict_members')) if array.get('can_restrict_members') is not None else None
data['can_pin_messages'] = bool(array.get('can_pin_messages')) if array.get('can_pin_messages') is not None else None
data['can_promote_members'] = bool(array.get('can_promote_members')) if array.get('can_promote_members') is not None else None
data['can_send_messages'] = bool(array.get('can_send_messages')) if array.get('can_send_messages') is not None else None
data['can_send_media_messages'] = bool(array.get('can_send_media_messages')) if array.get('can_send_media_messages') is not None else None
data['can_send_other_messages'] = bool(array.get('can_send_other_messages')) if array.get('can_send_other_messages') is not None else None
data['can_add_web_page_previews'] = bool(array.get('can_add_web_page_previews')) if array.get('can_add_web_page_previews') is not None else None
data['_raw'] = array
return ChatMember(**data)
|
def from_array(array)
|
Deserialize a new ChatMember from a given dictionary.
:return: new ChatMember instance.
:rtype: ChatMember
| 1.286365
| 1.222508
| 1.052234
|
corpus = etree.fromstring(s)
if single:
ds = _deserialize_mrs(next(corpus))
else:
ds = (_deserialize_mrs(mrs_elem) for mrs_elem in corpus)
return ds
|
def loads(s, single=False)
|
Deserialize MRX string representations
Args:
s (str): a MRX string
single (bool): if `True`, only return the first Xmrs object
Returns:
a generator of Xmrs objects (unless *single* is `True`)
| 5.413239
| 5.279304
| 1.02537
|
array = super(InlineQueryResultArticle, self).to_array()
# 'type' and 'id' given by superclass
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.url is not None:
array['url'] = u(self.url) # py2: type unicode, py3: type str
if self.hide_url is not None:
array['hide_url'] = bool(self.hide_url) # type bool
if self.description is not None:
array['description'] = u(self.description) # py2: type unicode, py3: type str
if self.thumb_url is not None:
array['thumb_url'] = u(self.thumb_url) # py2: type unicode, py3: type str
if self.thumb_width is not None:
array['thumb_width'] = int(self.thumb_width) # type int
if self.thumb_height is not None:
array['thumb_height'] = int(self.thumb_height) # type int
return array
|
def to_array(self)
|
Serializes this InlineQueryResultArticle to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.391801
| 1.336367
| 1.041481
|
array = super(InlineQueryResultGif, self).to_array()
# 'type' and 'id' given by superclass
array['gif_url'] = u(self.gif_url) # py2: type unicode, py3: type str
array['thumb_url'] = u(self.thumb_url) # py2: type unicode, py3: type str
if self.gif_width is not None:
array['gif_width'] = int(self.gif_width) # type int
if self.gif_height is not None:
array['gif_height'] = int(self.gif_height) # type int
if self.gif_duration is not None:
array['gif_duration'] = int(self.gif_duration) # type int
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array
|
def to_array(self)
|
Serializes this InlineQueryResultGif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.348093
| 1.303998
| 1.033815
|
array = super(InlineQueryResultAudio, self).to_array()
# 'type' and 'id' given by superclass
array['audio_url'] = u(self.audio_url) # py2: type unicode, py3: type str
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.performer is not None:
array['performer'] = u(self.performer) # py2: type unicode, py3: type str
if self.audio_duration is not None:
array['audio_duration'] = int(self.audio_duration) # type int
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array
|
def to_array(self)
|
Serializes this InlineQueryResultAudio to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.430989
| 1.390483
| 1.029131
|
array = super(InlineQueryResultLocation, self).to_array()
# 'type' and 'id' given by superclass
array['latitude'] = float(self.latitude) # type float
array['longitude'] = float(self.longitude) # type float
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.live_period is not None:
array['live_period'] = int(self.live_period) # type int
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
if self.thumb_url is not None:
array['thumb_url'] = u(self.thumb_url) # py2: type unicode, py3: type str
if self.thumb_width is not None:
array['thumb_width'] = int(self.thumb_width) # type int
if self.thumb_height is not None:
array['thumb_height'] = int(self.thumb_height) # type int
return array
|
def to_array(self)
|
Serializes this InlineQueryResultLocation to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.492489
| 1.423684
| 1.048329
|
array = super(InlineQueryResultContact, self).to_array()
# 'type' and 'id' given by superclass
array['phone_number'] = u(self.phone_number) # py2: type unicode, py3: type str
array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str
if self.last_name is not None:
array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
if self.thumb_url is not None:
array['thumb_url'] = u(self.thumb_url) # py2: type unicode, py3: type str
if self.thumb_width is not None:
array['thumb_width'] = int(self.thumb_width) # type int
if self.thumb_height is not None:
array['thumb_height'] = int(self.thumb_height) # type int
return array
|
def to_array(self)
|
Serializes this InlineQueryResultContact to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.432201
| 1.384566
| 1.034404
|
array = super(InlineQueryResultGame, self).to_array()
# 'type' and 'id' given by superclass
array['game_short_name'] = u(self.game_short_name) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
return array
|
def to_array(self)
|
Serializes this InlineQueryResultGame to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 2.721483
| 2.553104
| 1.065951
|
array = super(InlineQueryResultCachedGif, self).to_array()
# 'type' and 'id' given by superclass
array['gif_file_id'] = u(self.gif_file_id) # py2: type unicode, py3: type str
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array
|
def to_array(self)
|
Serializes this InlineQueryResultCachedGif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.549911
| 1.470646
| 1.053898
|
array = super(InlineQueryResultCachedDocument, self).to_array()
# 'type' and 'id' given by superclass
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['document_file_id'] = u(self.document_file_id) # py2: type unicode, py3: type str
if self.description is not None:
array['description'] = u(self.description) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array
|
def to_array(self)
|
Serializes this InlineQueryResultCachedDocument to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.472693
| 1.398014
| 1.053417
|
array = super(InlineQueryResultCachedVoice, self).to_array()
# 'type' and 'id' given by superclass
array['voice_file_id'] = u(self.voice_file_id) # py2: type unicode, py3: type str
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array
|
def to_array(self)
|
Serializes this InlineQueryResultCachedVoice to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.538552
| 1.47672
| 1.041871
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
data = {}
# 'type' is given by class type
data['id'] = u(array.get('id'))
data['audio_file_id'] = u(array.get('audio_file_id'))
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup')) if array.get('reply_markup') is not None else None
data['input_message_content'] = InputMessageContent.from_array(array.get('input_message_content')) if array.get('input_message_content') is not None else None
instance = InlineQueryResultCachedAudio(**data)
instance._raw = array
return instance
|
def from_array(array)
|
Deserialize a new InlineQueryResultCachedAudio from a given dictionary.
:return: new InlineQueryResultCachedAudio instance.
:rtype: InlineQueryResultCachedAudio
| 2.08344
| 1.808834
| 1.151814
|
array = super(InputTextMessageContent, self).to_array()
array['message_text'] = u(self.message_text) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.disable_web_page_preview is not None:
array['disable_web_page_preview'] = bool(self.disable_web_page_preview) # type bool
return array
|
def to_array(self)
|
Serializes this InputTextMessageContent to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.687617
| 1.594243
| 1.058569
|
array = super(InputLocationMessageContent, self).to_array()
array['latitude'] = float(self.latitude) # type float
array['longitude'] = float(self.longitude) # type float
if self.live_period is not None:
array['live_period'] = int(self.live_period) # type int
return array
|
def to_array(self)
|
Serializes this InputLocationMessageContent to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 2.411607
| 1.910709
| 1.262153
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['latitude'] = float(array.get('latitude'))
data['longitude'] = float(array.get('longitude'))
data['live_period'] = int(array.get('live_period')) if array.get('live_period') is not None else None
instance = InputLocationMessageContent(**data)
instance._raw = array
return instance
|
def from_array(array)
|
Deserialize a new InputLocationMessageContent from a given dictionary.
:return: new InputLocationMessageContent instance.
:rtype: InputLocationMessageContent
| 3.066705
| 2.229349
| 1.375605
|
array = super(InputVenueMessageContent, self).to_array()
array['latitude'] = float(self.latitude) # type float
array['longitude'] = float(self.longitude) # type float
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['address'] = u(self.address) # py2: type unicode, py3: type str
if self.foursquare_id is not None:
array['foursquare_id'] = u(self.foursquare_id) # py2: type unicode, py3: type str
if self.foursquare_type is not None:
array['foursquare_type'] = u(self.foursquare_type) # py2: type unicode, py3: type str
return array
|
def to_array(self)
|
Serializes this InputVenueMessageContent to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.596778
| 1.412258
| 1.130656
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['latitude'] = float(array.get('latitude'))
data['longitude'] = float(array.get('longitude'))
data['title'] = u(array.get('title'))
data['address'] = u(array.get('address'))
data['foursquare_id'] = u(array.get('foursquare_id')) if array.get('foursquare_id') is not None else None
data['foursquare_type'] = u(array.get('foursquare_type')) if array.get('foursquare_type') is not None else None
instance = InputVenueMessageContent(**data)
instance._raw = array
return instance
|
def from_array(array)
|
Deserialize a new InputVenueMessageContent from a given dictionary.
:return: new InputVenueMessageContent instance.
:rtype: InputVenueMessageContent
| 2.10702
| 1.63667
| 1.287382
|
array = super(InputContactMessageContent, self).to_array()
array['phone_number'] = u(self.phone_number) # py2: type unicode, py3: type str
array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str
if self.last_name is not None:
array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str
if self.vcard is not None:
array['vcard'] = u(self.vcard) # py2: type unicode, py3: type str
return array
|
def to_array(self)
|
Serializes this InputContactMessageContent to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.735237
| 1.557939
| 1.113803
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.passport import EncryptedCredentials
from pytgbot.api_types.receivable.passport import EncryptedPassportElement
data = {}
data['data'] = EncryptedPassportElement.from_array_list(array.get('data'), list_level=1)
data['credentials'] = EncryptedCredentials.from_array(array.get('credentials'))
data['_raw'] = array
return PassportData(**data)
|
def from_array(array)
|
Deserialize a new PassportData from a given dictionary.
:return: new PassportData instance.
:rtype: PassportData
| 3.196903
| 2.980693
| 1.072537
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.passport import PassportFile
data = {}
data['type'] = u(array.get('type'))
data['hash'] = u(array.get('hash'))
data['data'] = u(array.get('data')) if array.get('data') is not None else None
data['phone_number'] = u(array.get('phone_number')) if array.get('phone_number') is not None else None
data['email'] = u(array.get('email')) if array.get('email') is not None else None
data['files'] = PassportFile.from_array_list(array.get('files'), list_level=1) if array.get('files') is not None else None
data['front_side'] = PassportFile.from_array(array.get('front_side')) if array.get('front_side') is not None else None
data['reverse_side'] = PassportFile.from_array(array.get('reverse_side')) if array.get('reverse_side') is not None else None
data['selfie'] = PassportFile.from_array(array.get('selfie')) if array.get('selfie') is not None else None
data['translation'] = PassportFile.from_array_list(array.get('translation'), list_level=1) if array.get('translation') is not None else None
data['_raw'] = array
return EncryptedPassportElement(**data)
|
def from_array(array)
|
Deserialize a new EncryptedPassportElement from a given dictionary.
:return: new EncryptedPassportElement instance.
:rtype: EncryptedPassportElement
| 1.70731
| 1.621174
| 1.053132
|
variables_needed = []
variables_optional = []
imports = set()
for param in params_string.split("\n"):
variable = parse_param_types(param)
# any variable.types has always_is_value => lenght must be 1.
assert(not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
# end if
imports.update(variable.all_imports)
# end for
imports = list(imports)
imports.sort()
if isinstance(parent_clazz, str):
parent_clazz = to_type(parent_clazz, "parent class")
assert isinstance(parent_clazz, Type)
clazz_object = Clazz(imports=imports,
clazz=clazz, parent_clazz=parent_clazz, link=link, description=description,
parameters=variables_needed, keywords=variables_optional
)
return clazz_object
|
def clazz(clazz, parent_clazz, description, link, params_string, init_super_args=None)
|
Live template for pycharm:
y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$")
| 4.061203
| 4.069826
| 0.997881
|
variables_needed = []
variables_optional = []
imports = set()
if params_string: # WHITELISTED_FUNCS have no params
for param in params_string.split("\n"):
variable = parse_param_types(param)
# any variable.types has always_is_value => lenght must be 1.
assert (not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
# end if
imports.update(variable.all_imports)
# end for
# end if
imports = list(imports)
imports.sort()
returns = Variable(types=as_types(return_type, variable_name="return type"), description=returns)
func_object = Function(
imports=imports, api_name=command, link=link, description=description, returns=returns,
parameters=variables_needed, keywords=variables_optional
)
return func_object
|
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message")
|
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
| 5.069014
| 5.033972
| 1.006961
|
# type_string = type_string.strip()
# remove "list of " and set .is_list accordingly.
# is_list, type_string = can_strip_prefix(type_string, "list of ")
# var_type = Type(string=type_string, is_list=is_list)
var_type = Type(type_string)
# remove "list of " and set .is_list accordingly.
is_list = True
while is_list:
is_list, var_type.string = can_strip_prefix(var_type.string, "list of ")
if is_list:
var_type.is_list += 1
# end if
# end for
if var_type.string == "True":
var_type.string = "bool"
var_type.always_is_value = "True"
# end if
if var_type.string in ["int", "bool", "float", "object", "None", "str"]:
var_type.is_builtin = True
elif var_type.string == "unicode_type":
var_type.string = "unicode_type"
var_type.is_builtin = False
var_type.import_path = "luckydonaldUtils.encoding"
var_type.description = "py2: unicode, py3: str"
elif var_type.string in CLASS_TYPE_PATHS:
var_type.import_path = CLASS_TYPE_PATHS[var_type.string][CLASS_TYPE_PATHS__IMPORT].rstrip(".")
var_type.is_builtin = False
else:
logger.warn(
"Added unrecognized type in param <{var}>: {type!r}".format(var=variable_name, type=var_type.string))
# end if
return var_type
|
def to_type(type_string, variable_name) -> Type
|
Returns a :class:`Type` object of a given type name. Lookup is done via :var:`code_generator_settings.CLASS_TYPE_PATHS`
:param type_string: The type as string. E.g "bool". Need to be valid python.
:param variable_name: Only for logging, if an unrecognized type is found.
:return: a :class:`Type` instance
:rtype: Type
| 3.42588
| 3.28449
| 1.043048
|
if text.startswith(prefix):
return True, text[len(prefix):].strip()
return False, text.strip()
|
def can_strip_prefix(text:str, prefix:str) -> (bool, str)
|
If the given text starts with the given prefix, True and the text without that prefix is returned.
Else False and the original text is returned.
Note: the text always is stripped, before returning.
:param text:
:param prefix:
:return: (bool, str) :class:`bool` whether he text started with given prefix, :class:`str` the text without prefix
| 2.287503
| 3.464671
| 0.660237
|
if not self.api_name: # empty string
return self.api_name
# end if
return self.api_name[0].upper() + self.api_name[1:]
|
def class_name(self) -> str
|
Makes the fist letter big, keep the rest of the camelCaseApiName.
| 4.889471
| 3.210718
| 1.522859
|
# strip leading "Send"
name = self.class_name # "sendPhoto" -> "SendPhoto"
name = name[4:] if name.startswith('Send') else name # "SendPhoto" -> "Photo"
name = name + "Message" # "Photo" -> "PhotoMessage"
# e.g. "MessageMessage" will be replaced as "TextMessage"
# b/c "sendMessage" -> "SendMessage" -> "Message" -> "MessageMessage" ==> "TextMessage"
if name in MESSAGE_CLASS_OVERRIDES:
return MESSAGE_CLASS_OVERRIDES[name]
# end if
return name
|
def class_name_teleflask_message(self) -> str
|
If it starts with `Send` remove that.
| 5.470012
| 4.788697
| 1.142276
|
if self.path:
if self.name:
return self.path + "." + self.name
else:
return self.path
# end if
else:
if self.name:
return self.name
else:
return ""
|
def full(self)
|
self.path + "." + self.name
| 3.135361
| 2.155985
| 1.454259
|
logger.debug("Trying parsing as {type}, list_level={list_level}, is_builtin={is_builtin}".format(
type=required_type.__name__, list_level=list_level, is_builtin=is_builtin
))
if list_level > 0:
assert isinstance(result, (list, tuple))
return [from_array_list(required_type, obj, list_level-1, is_builtin) for obj in result]
# end if
if is_builtin:
if isinstance(result, required_type):
logger.debug("Already is correct type.")
return required_type(result)
elif isinstance(required_type, unicode_type): # handle str, so emojis work for py2.
return u(result)
else:
import ast
logger.warn("Trying parsing with ast.literal_eval()...")
return ast.literal_eval(str(result)) # raises ValueError if it could not parse
# end if
else:
return required_type.from_array(result)
|
def from_array_list(required_type, result, list_level, is_builtin)
|
Tries to parse the `result` as type given in `required_type`, while traversing into lists as often as specified in `list_level`.
:param required_type: What it should be parsed as
:type required_type: class
:param result: The result to parse
:param list_level: "list of" * list_level
:type list_level: int
:param is_builtin: if it is a builtin python type like :class:`int`, :class:`bool`, etc.
:type is_builtin: bool
:return: the result as `required_type` type
| 3.273971
| 3.206632
| 1.021
|
if hasattr(obj, "to_array"):
return obj.to_array()
elif isinstance(obj, (list, tuple)):
return [as_array(x) for x in obj]
elif isinstance(obj, dict):
return {key:as_array(obj[key]) for key in obj.keys()}
else:
_json_dumps(obj) # raises error if is wrong json
return obj
|
def as_array(obj)
|
Creates an json-like representation of a variable, supporting types with a `.to_array()` function.
:rtype: dict|list|str|int|float|bool|None
| 2.880221
| 2.961229
| 0.972644
|
return from_array_list(cls, result, list_level, is_builtin=False)
|
def from_array_list(cls, result, list_level)
|
Tries to parse the `result` as type given in `required_type`, while traversing into lists as often as specified in `list_level`.
:param cls: Type as what it should be parsed as. Can be any class extending :class:`TgBotApiObject`.
E.g. If you call `Class.from_array_list`, it will automatically be set to `Class`.
:type cls: class
:param result: The result to parse
:param list_level: "list of" * list_level
:type list_level: int
:return: the result as `required_type` type
| 6.853365
| 11.184513
| 0.612755
|
return from_array_list(required_type, value, list_level, is_builtin=True)
|
def _builtin_from_array_list(required_type, value, list_level)
|
Helper method to make :func:`from_array_list` available to all classes extending this,
without the need for additional imports.
:param required_type: Type as what it should be parsed as. Any builtin.
:param value: The result to parse
:param list_level: "list of" * list_level
:return:
| 4.797598
| 5.962274
| 0.804659
|
url, params = self._prepare_request(command, query)
return {
"url": url, "params": params, "files": files, "stream": use_long_polling,
"verify": True, # No self signed certificates. Telegram should be trustworthy anyway...
"timeout": request_timeout
}
|
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query)
|
Return the request params we would send to the api.
| 6.166047
| 5.50101
| 1.120894
|
array = super(Update, self).to_array()
array['update_id'] = int(self.update_id) # type int
if self.message is not None:
array['message'] = self.message.to_array() # type Message
if self.edited_message is not None:
array['edited_message'] = self.edited_message.to_array() # type Message
if self.channel_post is not None:
array['channel_post'] = self.channel_post.to_array() # type Message
if self.edited_channel_post is not None:
array['edited_channel_post'] = self.edited_channel_post.to_array() # type Message
if self.inline_query is not None:
array['inline_query'] = self.inline_query.to_array() # type InlineQuery
if self.chosen_inline_result is not None:
array['chosen_inline_result'] = self.chosen_inline_result.to_array() # type ChosenInlineResult
if self.callback_query is not None:
array['callback_query'] = self.callback_query.to_array() # type CallbackQuery
if self.shipping_query is not None:
array['shipping_query'] = self.shipping_query.to_array() # type ShippingQuery
if self.pre_checkout_query is not None:
array['pre_checkout_query'] = self.pre_checkout_query.to_array() # type PreCheckoutQuery
return array
|
def to_array(self)
|
Serializes this Update to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.259128
| 1.265095
| 0.995284
|
array = super(WebhookInfo, self).to_array()
array['url'] = u(self.url) # py2: type unicode, py3: type str
array['has_custom_certificate'] = bool(self.has_custom_certificate) # type bool
array['pending_update_count'] = int(self.pending_update_count) # type int
if self.last_error_date is not None:
array['last_error_date'] = int(self.last_error_date) # type int
if self.last_error_message is not None:
array['last_error_message'] = u(self.last_error_message) # py2: type unicode, py3: type str
if self.max_connections is not None:
array['max_connections'] = int(self.max_connections) # type int
if self.allowed_updates is not None:
array['allowed_updates'] = self._as_array(self.allowed_updates) # type list of str
return array
|
def to_array(self)
|
Serializes this WebhookInfo to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
| 1.692212
| 1.688329
| 1.0023
|
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['url'] = u(array.get('url'))
data['has_custom_certificate'] = bool(array.get('has_custom_certificate'))
data['pending_update_count'] = int(array.get('pending_update_count'))
data['last_error_date'] = int(array.get('last_error_date')) if array.get('last_error_date') is not None else None
data['last_error_message'] = u(array.get('last_error_message')) if array.get('last_error_message') is not None else None
data['max_connections'] = int(array.get('max_connections')) if array.get('max_connections') is not None else None
data['allowed_updates'] = WebhookInfo._builtin_from_array_list(required_type=unicode_type, value=array.get('allowed_updates'), list_level=1) if array.get('allowed_updates') is not None else None
data['_raw'] = array
return WebhookInfo(**data)
|
def from_array(array)
|
Deserialize a new WebhookInfo from a given dictionary.
:return: new WebhookInfo instance.
:rtype: WebhookInfo
| 2.414225
| 2.039294
| 1.183853
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.