hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dcab710d8c17a8ebb18ddd1d9ad4bb2976191f86
| 61
|
py
|
Python
|
vis/__init__.py
|
DoubleTwelve/lamarepy
|
548e6d6514183ab8ceafd1a42097210350a74985
|
[
"BSD-3-Clause"
] | null | null | null |
vis/__init__.py
|
DoubleTwelve/lamarepy
|
548e6d6514183ab8ceafd1a42097210350a74985
|
[
"BSD-3-Clause"
] | null | null | null |
vis/__init__.py
|
DoubleTwelve/lamarepy
|
548e6d6514183ab8ceafd1a42097210350a74985
|
[
"BSD-3-Clause"
] | null | null | null |
from lamarepy import geometry as g
from show_q import show_q
| 20.333333
| 34
| 0.836066
|
0423991f614dbc92e8de0c9d23e2bce7822454ee
| 1,147
|
py
|
Python
|
sdk/python/kfp/components/executor_main.py
|
TheDutchDevil/pipelines
|
a5ba3f0fcd98ffd60f98bce964927ab63382d5d7
|
[
"Apache-2.0"
] | 1
|
2020-12-20T11:26:00.000Z
|
2020-12-20T11:26:00.000Z
|
sdk/python/kfp/components/executor_main.py
|
TheDutchDevil/pipelines
|
a5ba3f0fcd98ffd60f98bce964927ab63382d5d7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/components/executor_main.py
|
TheDutchDevil/pipelines
|
a5ba3f0fcd98ffd60f98bce964927ab63382d5d7
|
[
"Apache-2.0"
] | 1
|
2022-01-11T17:02:22.000Z
|
2022-01-11T17:02:22.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp.components.executor import Executor
def executor_main():
import argparse
import json
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--executor_input', type=str)
parser.add_argument('--function_to_execute', type=str)
args, _ = parser.parse_known_args()
executor_input = json.loads(args.executor_input)
function_to_execute = globals()[args.function_to_execute]
executor = Executor(executor_input=executor_input,
function_to_execute=function_to_execute)
executor.execute()
| 34.757576
| 74
| 0.760244
|
ff2045ae64e8037c741c01140738e4bb6bc6924b
| 7,335
|
py
|
Python
|
ckanext/datastore/view.py
|
sabinem/ckan
|
dfad5d98a644a76939a57872073ef27dc7c68d86
|
[
"Apache-2.0"
] | 1
|
2020-07-09T02:10:18.000Z
|
2020-07-09T02:10:18.000Z
|
ckanext/datastore/view.py
|
sabinem/ckan
|
dfad5d98a644a76939a57872073ef27dc7c68d86
|
[
"Apache-2.0"
] | 3
|
2020-03-24T17:56:04.000Z
|
2021-02-02T22:16:31.000Z
|
ckanext/datastore/view.py
|
sabinem/ckan
|
dfad5d98a644a76939a57872073ef27dc7c68d86
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
from itertools import izip_longest
from flask import Blueprint, make_response
from flask.views import MethodView
import ckan.lib.navl.dictization_functions as dict_fns
from ckan.logic import (
tuplize_dict,
parse_params,
)
from ckan.plugins.toolkit import (
ObjectNotFound, NotAuthorized, get_action, get_validator, _, request,
abort, render, c, h
)
from ckanext.datastore.logic.schema import (
list_of_strings_or_string,
json_validator,
unicode_or_json_validator,
)
from ckanext.datastore.writer import (
csv_writer,
tsv_writer,
json_writer,
xml_writer,
)
int_validator = get_validator(u'int_validator')
boolean_validator = get_validator(u'boolean_validator')
ignore_missing = get_validator(u'ignore_missing')
OneOf = get_validator(u'OneOf')
default = get_validator(u'default')
unicode_only = get_validator(u'unicode_only')
DUMP_FORMATS = u'csv', u'tsv', u'json', u'xml'
PAGINATE_BY = 32000
datastore = Blueprint(u'datastore', __name__)
def dump_schema():
return {
u'offset': [default(0), int_validator],
u'limit': [ignore_missing, int_validator],
u'format': [default(u'csv'), OneOf(DUMP_FORMATS)],
u'bom': [default(False), boolean_validator],
u'filters': [ignore_missing, json_validator],
u'q': [ignore_missing, unicode_or_json_validator],
u'distinct': [ignore_missing, boolean_validator],
u'plain': [ignore_missing, boolean_validator],
u'language': [ignore_missing, unicode_only],
u'fields': [ignore_missing, list_of_strings_or_string],
u'sort': [default(u'_id'), list_of_strings_or_string],
}
def dump(resource_id):
data, errors = dict_fns.validate(request.args.to_dict(), dump_schema())
if errors:
abort(
400, u'\n'.join(
u'{0}: {1}'.format(k, u' '.join(e)) for k, e in errors.items()
)
)
response = make_response()
response.headers[u'content-type'] = u'application/octet-stream'
try:
dump_to(
resource_id,
response.stream,
fmt=data[u'format'],
offset=data[u'offset'],
limit=data.get(u'limit'),
options={u'bom': data[u'bom']},
sort=data[u'sort'],
search_params={
k: v
for k, v in data.items()
if k in [
u'filters', u'q', u'distinct', u'plain', u'language',
u'fields'
]
},
)
except ObjectNotFound:
abort(404, _(u'DataStore resource not found'))
return response
class DictionaryView(MethodView):
def _prepare(self, id, resource_id):
try:
# resource_edit_base template uses these
pkg_dict = get_action(u'package_show')(None, {u'id': id})
resource = get_action(u'resource_show')(None, {u'id': resource_id})
rec = get_action(u'datastore_search')(
None, {
u'resource_id': resource_id,
u'limit': 0
}
)
return {
u'pkg_dict': pkg_dict,
u'resource': resource,
u'fields': [
f for f in rec[u'fields'] if not f[u'id'].startswith(u'_')
]
}
except (ObjectNotFound, NotAuthorized):
abort(404, _(u'Resource not found'))
def get(self, id, resource_id):
u'''Data dictionary view: show field labels and descriptions'''
data_dict = self._prepare(id, resource_id)
# global variables for backward compatibility
c.pkg_dict = data_dict[u'pkg_dict']
c.resource = data_dict[u'resource']
return render(u'datastore/dictionary.html', data_dict)
def post(self, id, resource_id):
u'''Data dictionary view: edit field labels and descriptions'''
data_dict = self._prepare(id, resource_id)
fields = data_dict[u'fields']
data = dict_fns.unflatten(tuplize_dict(parse_params(request.form)))
info = data.get(u'info')
if not isinstance(info, list):
info = []
info = info[:len(fields)]
get_action(u'datastore_create')(
None, {
u'resource_id': resource_id,
u'force': True,
u'fields': [{
u'id': f[u'id'],
u'type': f[u'type'],
u'info': fi if isinstance(fi, dict) else {}
} for f, fi in izip_longest(fields, info)]
}
)
h.flash_success(
_(
u'Data Dictionary saved. Any type overrides will '
u'take effect when the resource is next uploaded '
u'to DataStore'
)
)
return h.redirect_to(
u'datastore.dictionary', id=id, resource_id=resource_id
)
def dump_to(
resource_id, output, fmt, offset, limit, options, sort, search_params
):
if fmt == u'csv':
writer_factory = csv_writer
records_format = u'csv'
elif fmt == u'tsv':
writer_factory = tsv_writer
records_format = u'tsv'
elif fmt == u'json':
writer_factory = json_writer
records_format = u'lists'
elif fmt == u'xml':
writer_factory = xml_writer
records_format = u'objects'
def start_writer(fields):
bom = options.get(u'bom', False)
return writer_factory(output, fields, resource_id, bom)
def result_page(offs, lim):
return get_action(u'datastore_search')(
None,
dict({
u'resource_id': resource_id,
u'limit': PAGINATE_BY
if limit is None else min(PAGINATE_BY, lim),
u'offset': offs,
u'sort': sort,
u'records_format': records_format,
u'include_total': False,
}, **search_params)
)
result = result_page(offset, limit)
if result[u'limit'] != limit:
# `limit` (from PAGINATE_BY) must have been more than
# ckan.datastore.search.rows_max, so datastore_search responded with a
# limit matching ckan.datastore.search.rows_max. So we need to paginate
# by that amount instead, otherwise we'll have gaps in the records.
paginate_by = result[u'limit']
else:
paginate_by = PAGINATE_BY
with start_writer(result[u'fields']) as wr:
while True:
if limit is not None and limit <= 0:
break
records = result[u'records']
wr.write_records(records)
if records_format == u'objects' or records_format == u'lists':
if len(records) < paginate_by:
break
elif not records:
break
offset += paginate_by
if limit is not None:
limit -= paginate_by
if limit <= 0:
break
result = result_page(offset, limit)
datastore.add_url_rule(u'/datastore/dump/<resource_id>', view_func=dump)
datastore.add_url_rule(
u'/dataset/<id>/dictionary/<resource_id>',
view_func=DictionaryView.as_view(str(u'dictionary'))
)
| 31.080508
| 79
| 0.57396
|
8c33846566502fb907c05b2a4a21a923d929cbe8
| 5,170
|
py
|
Python
|
sdk/AsposeEmailCloudSdk/models/contact_save_request.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | 1
|
2020-02-26T13:19:06.000Z
|
2020-02-26T13:19:06.000Z
|
sdk/AsposeEmailCloudSdk/models/contact_save_request.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | null | null | null |
sdk/AsposeEmailCloudSdk/models/contact_save_request.py
|
aspose-email-cloud/aspose-email-cloud-python
|
c5c13839cbbbfa5b6617bd1aedf3cf30cd664227
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="ContactSaveRequest.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
from typing import List, Set, Dict, Tuple, Optional
from datetime import datetime
from AsposeEmailCloudSdk.models.contact_dto import ContactDto
from AsposeEmailCloudSdk.models.storage_file_location import StorageFileLocation
from AsposeEmailCloudSdk.models.storage_model_of_contact_dto import StorageModelOfContactDto
class ContactSaveRequest(StorageModelOfContactDto):
"""Contact save to storage request
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'storage_file': 'StorageFileLocation',
'value': 'ContactDto',
'format': 'str'
}
attribute_map = {
'storage_file': 'storageFile',
'value': 'value',
'format': 'format'
}
def __init__(self, storage_file: StorageFileLocation = None, value: ContactDto = None, format: str = None):
"""
Contact save to storage request
:param storage_file:
:type storage_file: StorageFileLocation
:param value:
:type value: ContactDto
:param format: Enumerates contact formats. Enum, available values: VCard, WebDav, Msg
:type format: str
"""
super(ContactSaveRequest, self).__init__()
self._format = None
if storage_file is not None:
self.storage_file = storage_file
if value is not None:
self.value = value
if format is not None:
self.format = format
@property
def format(self) -> str:
"""
Enumerates contact formats. Enum, available values: VCard, WebDav, Msg
:return: The format of this ContactSaveRequest.
:rtype: str
"""
return self._format
@format.setter
def format(self, format: str):
"""
Enumerates contact formats. Enum, available values: VCard, WebDav, Msg
:param format: The format of this ContactSaveRequest.
:type: str
"""
if format is None:
raise ValueError("Invalid value for `format`, must not be `None`")
self._format = format
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContactSaveRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.170068
| 111
| 0.602708
|
25d534bc2b05ef04837c44040413711e57f4be9c
| 129,380
|
py
|
Python
|
kivy/uix/textinput.py
|
outdooracorn/kivy
|
a55ad400a52e038f110df10d8f3c1e822b90f76c
|
[
"MIT"
] | null | null | null |
kivy/uix/textinput.py
|
outdooracorn/kivy
|
a55ad400a52e038f110df10d8f3c1e822b90f76c
|
[
"MIT"
] | 1
|
2021-09-01T22:57:26.000Z
|
2021-09-01T22:57:26.000Z
|
kivy/uix/textinput.py
|
outdooracorn/kivy
|
a55ad400a52e038f110df10d8f3c1e822b90f76c
|
[
"MIT"
] | null | null | null |
'''
Text Input
==========
.. versionadded:: 1.0.4
.. image:: images/textinput-mono.jpg
.. image:: images/textinput-multi.jpg
The :class:`TextInput` widget provides a box for editable plain text.
Unicode, multiline, cursor navigation, selection and clipboard features
are supported.
The :class:`TextInput` uses two different coordinate systems:
* (x, y) - coordinates in pixels, mostly used for rendering on screen.
* (col, row) - cursor index in characters / lines, used for selection
and cursor movement.
Usage example
-------------
To create a multiline :class:`TextInput` (the 'enter' key adds a new line)::
from kivy.uix.textinput import TextInput
textinput = TextInput(text='Hello world')
To create a singleline :class:`TextInput`, set the :class:`TextInput.multiline`
property to False (the 'enter' key will defocus the TextInput and emit an
:meth:`TextInput.on_text_validate` event)::
def on_enter(instance, value):
print('User pressed enter in', instance)
textinput = TextInput(text='Hello world', multiline=False)
textinput.bind(on_text_validate=on_enter)
The textinput's text is stored in its :attr:`TextInput.text` property. To run a
callback when the text changes::
def on_text(instance, value):
print('The widget', instance, 'have:', value)
textinput = TextInput()
textinput.bind(text=on_text)
You can set the :class:`focus <kivy.uix.behaviors.FocusBehavior>` to a
Textinput, meaning that the input box will be highlighted and keyboard focus
will be requested::
textinput = TextInput(focus=True)
The textinput is defocused if the 'escape' key is pressed, or if another
widget requests the keyboard. You can bind a callback to the focus property to
get notified of focus changes::
def on_focus(instance, value):
if value:
print('User focused', instance)
else:
print('User defocused', instance)
textinput = TextInput()
textinput.bind(focus=on_focus)
See :class:`~kivy.uix.behaviors.FocusBehavior`, from which the
:class:`TextInput` inherits, for more details.
Selection
---------
The selection is automatically updated when the cursor position changes.
You can get the currently selected text from the
:attr:`TextInput.selection_text` property.
Filtering
---------
You can control which text can be added to the :class:`TextInput` by
overwriting :meth:`TextInput.insert_text`. Every string that is typed, pasted
or inserted by any other means into the :class:`TextInput` is passed through
this function. By overwriting it you can reject or change unwanted characters.
For example, to write only in capitalized characters::
class CapitalInput(TextInput):
def insert_text(self, substring, from_undo=False):
s = substring.upper()
return super().insert_text(s, from_undo=from_undo)
Or to only allow floats (0 - 9 and a single period)::
class FloatInput(TextInput):
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join(
re.sub(pat, '', s)
for s in substring.split('.', 1)
)
return super().insert_text(s, from_undo=from_undo)
Default shortcuts
-----------------
=============== ========================================================
Shortcuts Description
--------------- --------------------------------------------------------
Left Move cursor to left
Right Move cursor to right
Up Move cursor to up
Down Move cursor to down
Home Move cursor at the beginning of the line
End Move cursor at the end of the line
PageUp Move cursor to 3 lines before
PageDown Move cursor to 3 lines after
Backspace Delete the selection or character before the cursor
Del Delete the selection of character after the cursor
Shift + <dir> Start a text selection. Dir can be Up, Down, Left or
Right
Control + c Copy selection
Control + x Cut selection
Control + v Paste clipboard content
Control + a Select all the content
Control + z undo
Control + r redo
=============== ========================================================
.. note::
To enable Emacs-style keyboard shortcuts, you can use
:class:`~kivy.uix.behaviors.emacs.EmacsBehavior`.
'''
import re
import sys
import math
from os import environ
from weakref import ref
from itertools import chain, islice
from kivy.animation import Animation
from kivy.base import EventLoop
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.config import Config
from kivy.core.window import Window
from kivy.metrics import inch
from kivy.utils import boundary, platform
from kivy.uix.behaviors import FocusBehavior
from kivy.core.text import Label, DEFAULT_FONT
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix, Callback
from kivy.graphics.context_instructions import Transform
from kivy.graphics.texture import Texture
from kivy.uix.widget import Widget
from kivy.uix.bubble import Bubble
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from kivy.properties import StringProperty, NumericProperty, \
BooleanProperty, AliasProperty, OptionProperty, \
ListProperty, ObjectProperty, VariableListProperty, ColorProperty
__all__ = ('TextInput', )
if 'KIVY_DOC' in environ:
def triggered(*_, **__):
def decorator_func(func):
def decorated_func(*args, **kwargs):
return func(*args, **kwargs)
return decorated_func
return decorator_func
else:
from kivy.clock import triggered
Cache_register = Cache.register
Cache_append = Cache.append
Cache_get = Cache.get
Cache_remove = Cache.remove
Cache_register('textinput.label', timeout=60.)
Cache_register('textinput.width', timeout=60.)
FL_IS_LINEBREAK = 0x01
FL_IS_WORDBREAK = 0x02
FL_IS_NEWLINE = FL_IS_LINEBREAK | FL_IS_WORDBREAK
# late binding
Clipboard = None
CutBuffer = None
MarkupLabel = None
_platform = platform
# for reloading, we need to keep a list of textinput to retrigger the rendering
_textinput_list = []
# cache the result
_is_osx = sys.platform == 'darwin'
# When we are generating documentation, Config doesn't exist
_is_desktop = False
if Config:
_is_desktop = Config.getboolean('kivy', 'desktop')
# register an observer to clear the textinput cache when OpenGL will reload
if 'KIVY_DOC' not in environ:
def _textinput_clear_cache(*l):
Cache_remove('textinput.label')
Cache_remove('textinput.width')
for wr in _textinput_list[:]:
textinput = wr()
if textinput is None:
_textinput_list.remove(wr)
else:
textinput._trigger_refresh_text()
textinput._refresh_hint_text()
from kivy.graphics.context import get_context
get_context().add_reload_observer(_textinput_clear_cache, True)
class Selector(ButtonBehavior, Image):
# Internal class for managing the selection Handles.
window = ObjectProperty()
target = ObjectProperty()
matrix = ObjectProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.matrix = self.target.get_window_matrix()
with self.canvas.before:
Callback(self.update_transform)
PushMatrix()
self.transform = Transform()
with self.canvas.after:
PopMatrix()
def update_transform(self, cb):
matrix = self.target.get_window_matrix()
if self.matrix != matrix:
self.matrix = matrix
self.transform.identity()
self.transform.transform(self.matrix)
def transform_touch(self, touch):
matrix = self.matrix.inverse()
touch.apply_transform_2d(
lambda x, y: matrix.transform_point(x, y, 0)[:2]
)
def on_touch_down(self, touch):
if self.parent is not EventLoop.window:
return
try:
touch.push()
self.transform_touch(touch)
self._touch_diff = self.top - touch.y
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
return super().on_touch_down(touch)
finally:
touch.pop()
class TextInputCutCopyPaste(Bubble):
# Internal class used for showing the little bubble popup when
# copy/cut/paste happen.
textinput = ObjectProperty(None)
''' Holds a reference to the TextInput this Bubble belongs to.
'''
but_cut = ObjectProperty(None)
but_copy = ObjectProperty(None)
but_paste = ObjectProperty(None)
but_selectall = ObjectProperty(None)
matrix = ObjectProperty(None)
_check_parent_ev = None
def __init__(self, **kwargs):
self.mode = 'normal'
super().__init__(**kwargs)
self._check_parent_ev = Clock.schedule_interval(self._check_parent, .5)
self.matrix = self.textinput.get_window_matrix()
with self.canvas.before:
Callback(self.update_transform)
PushMatrix()
self.transform = Transform()
with self.canvas.after:
PopMatrix()
def update_transform(self, cb):
m = self.textinput.get_window_matrix()
if self.matrix != m:
self.matrix = m
self.transform.identity()
self.transform.transform(self.matrix)
def transform_touch(self, touch):
matrix = self.matrix.inverse()
touch.apply_transform_2d(
lambda x, y: matrix.transform_point(x, y, 0)[:2])
def on_touch_down(self, touch):
try:
touch.push()
self.transform_touch(touch)
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
return super().on_touch_down(touch)
finally:
touch.pop()
def on_touch_up(self, touch):
try:
touch.push()
self.transform_touch(touch)
for child in self.content.children:
if ref(child) in touch.grab_list:
touch.grab_current = child
break
return super().on_touch_up(touch)
finally:
touch.pop()
def on_textinput(self, instance, value):
global Clipboard
if value and not Clipboard and not _is_desktop:
value._ensure_clipboard()
def _check_parent(self, dt):
# this is a prevention to get the Bubble staying on the screen, if the
# attached textinput is not on the screen anymore.
parent = self.textinput
while parent is not None:
if parent == parent.parent:
break
parent = parent.parent
if parent is None:
self._check_parent_ev.cancel()
if self.textinput:
self.textinput._hide_cut_copy_paste()
def on_parent(self, instance, value):
parent = self.textinput
mode = self.mode
if parent:
self.clear_widgets()
if mode == 'paste':
# show only paste on long touch
self.but_selectall.opacity = 1
widget_list = [self.but_selectall, ]
if not parent.readonly:
widget_list.append(self.but_paste)
elif parent.readonly:
# show only copy for read only text input
widget_list = (self.but_copy, )
else:
# normal mode
widget_list = (self.but_cut, self.but_copy, self.but_paste)
for widget in widget_list:
self.add_widget(widget)
def do(self, action):
textinput = self.textinput
if action == 'cut':
textinput._cut(textinput.selection_text)
elif action == 'copy':
textinput.copy()
elif action == 'paste':
textinput.paste()
elif action == 'selectall':
textinput.select_all()
self.mode = ''
anim = Animation(opacity=0, d=.333)
anim.bind(on_complete=lambda *args:
self.on_parent(self, self.parent))
anim.start(self.but_selectall)
return
self.hide()
def hide(self):
parent = self.parent
if not parent:
return
anim = Animation(opacity=0, d=.225)
anim.bind(on_complete=lambda *args: parent.remove_widget(self))
anim.start(self)
class TextInput(FocusBehavior, Widget):
'''TextInput class. See module documentation for more information.
:Events:
`on_text_validate`
Fired only in multiline=False mode when the user hits 'enter'.
This will also unfocus the textinput.
`on_double_tap`
Fired when a double tap happens in the text input. The default
behavior selects the text around the cursor position. More info at
:meth:`on_double_tap`.
`on_triple_tap`
Fired when a triple tap happens in the text input. The default
behavior selects the line around the cursor position. More info at
:meth:`on_triple_tap`.
`on_quad_touch`
Fired when four fingers are touching the text input. The default
behavior selects the whole text. More info at
:meth:`on_quad_touch`.
.. warning::
When changing a :class:`TextInput` property that requires re-drawing,
e.g. modifying the :attr:`text`, the updates occur on the next
clock cycle and not instantly. This might cause any changes to the
:class:`TextInput` that occur between the modification and the next
cycle to be ignored, or to use previous values. For example, after
a update to the :attr:`text`, changing the cursor in the same clock
frame will move it using the previous text and will likely end up in an
incorrect position. The solution is to schedule any updates to occur
on the next clock cycle using
:meth:`~kivy.clock.ClockBase.schedule_once`.
.. Note::
Selection is cancelled when TextInput is focused. If you need to
show selection when TextInput is focused, you should delay
(use Clock.schedule) the call to the functions for selecting
text (select_all, select_text).
.. versionchanged:: 1.10.0
`background_disabled_active` has been removed.
.. versionchanged:: 1.9.0
:class:`TextInput` now inherits from
:class:`~kivy.uix.behaviors.FocusBehavior`.
:attr:`~kivy.uix.behaviors.FocusBehavior.keyboard_mode`,
:meth:`~kivy.uix.behaviors.FocusBehavior.show_keyboard`,
:meth:`~kivy.uix.behaviors.FocusBehavior.hide_keyboard`,
:meth:`~kivy.uix.behaviors.FocusBehavior.focus`,
and :attr:`~kivy.uix.behaviors.FocusBehavior.input_type`
have been removed since they are now inherited
from :class:`~kivy.uix.behaviors.FocusBehavior`.
.. versionchanged:: 1.7.0
`on_double_tap`, `on_triple_tap` and `on_quad_touch` events added.
.. versionchanged:: 2.1.0
:attr:`~kivy.uix.behaviors.FocusBehavior.keyboard_suggestions`
is now inherited from :class:`~kivy.uix.behaviors.FocusBehavior`.
'''
__events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap',
'on_quad_touch')
_resolved_base_dir = None
def __init__(self, **kwargs):
self._update_graphics_ev = Clock.create_trigger(
self._update_graphics, -1)
self.is_focusable = kwargs.get('is_focusable', True)
self._cursor = [0, 0]
self._selection = False
self._selection_finished = True
self._selection_touch = None
self.selection_text = u''
self._selection_from = None
self._selection_to = None
self._selection_callback = None
self._handle_left = None
self._handle_right = None
self._handle_middle = None
self._bubble = None
self._lines_flags = []
self._lines_labels = []
self._lines_rects = []
self._hint_text_flags = []
self._hint_text_labels = []
self._hint_text_rects = []
self._label_cached = None
self._line_options = None
self._keyboard_mode = Config.get('kivy', 'keyboard_mode')
self._command_mode = False
self._command = ''
self.reset_undo()
self._touch_count = 0
self._ctrl_l = False
self._ctrl_r = False
self._alt_l = False
self._alt_r = False
self._refresh_text_from_property_ev = None
self._long_touch_ev = None
self._do_blink_cursor_ev = Clock.create_trigger(
self._do_blink_cursor, .5, interval=True)
self._refresh_line_options_ev = None
# [from; to) range of lines being partially or fully rendered
# in TextInput's viewport
self._visible_lines_range = 0, 0
self.interesting_keys = {
8: 'backspace',
13: 'enter',
127: 'del',
271: 'enter',
273: 'cursor_up',
274: 'cursor_down',
275: 'cursor_right',
276: 'cursor_left',
278: 'cursor_home',
279: 'cursor_end',
280: 'cursor_pgup',
281: 'cursor_pgdown',
303: 'shift_L',
304: 'shift_R',
305: 'ctrl_L',
306: 'ctrl_R',
308: 'alt_L',
307: 'alt_R'
}
super().__init__(**kwargs)
fbind = self.fbind
refresh_line_options = self._trigger_refresh_line_options
update_text_options = self._update_text_options
trigger_update_graphics = self._trigger_update_graphics
fbind('font_size', refresh_line_options)
fbind('font_name', refresh_line_options)
fbind('font_context', refresh_line_options)
fbind('font_family', refresh_line_options)
fbind('base_direction', refresh_line_options)
fbind('text_language', refresh_line_options)
def handle_readonly(instance, value):
if value and (not _is_desktop or not self.allow_copy):
self.is_focusable = False
if (not (value or self.disabled) or _is_desktop and
self._keyboard_mode == 'system'):
self._editable = True
else:
self._editable = False
fbind('padding', update_text_options)
fbind('tab_width', update_text_options)
fbind('font_size', update_text_options)
fbind('font_name', update_text_options)
fbind('size', update_text_options)
fbind('password', update_text_options)
fbind('password_mask', update_text_options)
fbind('pos', trigger_update_graphics)
fbind('halign', trigger_update_graphics)
fbind('readonly', handle_readonly)
fbind('focus', self._on_textinput_focused)
handle_readonly(self, self.readonly)
handles = self._trigger_position_handles = Clock.create_trigger(
self._position_handles)
self._trigger_show_handles = Clock.create_trigger(
self._show_handles, .05)
self._trigger_cursor_reset = Clock.create_trigger(
self._reset_cursor_blink)
self._trigger_update_cutbuffer = Clock.create_trigger(
self._update_cutbuffer)
refresh_line_options()
self._trigger_refresh_text()
fbind('pos', handles)
fbind('size', handles)
# when the gl context is reloaded, trigger the text rendering again.
_textinput_list.append(ref(self, TextInput._reload_remove_observer))
if platform == 'linux':
self._ensure_clipboard()
def on_text_validate(self):
pass
def cursor_index(self, cursor=None):
'''Return the cursor index in the text/value.
'''
if not cursor:
cursor = self.cursor
try:
lines = self._lines
if not lines:
return 0
flags = self._lines_flags
index, cursor_row = cursor
for _, line, flag in zip(
range(min(cursor_row, len(lines))),
lines,
flags
):
index += len(line)
if flag & FL_IS_LINEBREAK:
index += 1
if flags[cursor_row] & FL_IS_LINEBREAK:
index += 1
return index
except IndexError:
return 0
def cursor_offset(self):
'''Get the cursor x offset on the current line.
'''
offset = 0
row = int(self.cursor_row)
col = int(self.cursor_col)
lines = self._lines
if col and row < len(lines):
offset = self._get_text_width(
lines[row][:col],
self.tab_width,
self._label_cached
)
return offset
def get_cursor_from_index(self, index):
'''Return the (col, row) of the cursor from text index.
'''
index = boundary(index, 0, len(self.text))
if index <= 0:
return 0, 0
flags = self._lines_flags
lines = self._lines
if not lines:
return 0, 0
i = 0
for row, line in enumerate(lines):
count = i + len(line)
if flags[row] & FL_IS_LINEBREAK:
count += 1
i += 1
if count >= index:
return index - i, row
i = count
return int(index), int(row)
def select_text(self, start, end):
''' Select a portion of text displayed in this TextInput.
.. versionadded:: 1.4.0
:Parameters:
`start`
Index of textinput.text from where to start selection
`end`
Index of textinput.text till which the selection should be
displayed
'''
if end < start:
raise Exception('end must be superior to start')
text_length = len(self.text)
self._selection_from = boundary(start, 0, text_length)
self._selection_to = boundary(end, 0, text_length)
self._selection_finished = True
self._update_selection(True)
self._update_graphics_selection()
def select_all(self):
''' Select all of the text displayed in this TextInput.
.. versionadded:: 1.4.0
'''
self.select_text(0, len(self.text))
re_indent = re.compile(r'^(\s*|)')
def _auto_indent(self, substring):
index = self.cursor_index()
if index > 0:
_text = self.text
line_start = _text.rfind('\n', 0, index)
if line_start > -1:
line = _text[line_start + 1:index]
indent = self.re_indent.match(line).group()
substring += indent
return substring
def insert_text(self, substring, from_undo=False):
'''Insert new text at the current cursor position. Override this
function in order to pre-process text for input validation.
'''
if self.readonly or not substring or not self._lines:
return
if isinstance(substring, bytes):
substring = substring.decode('utf8')
if self.replace_crlf:
substring = substring.replace(u'\r\n', u'\n')
self._hide_handles(EventLoop.window)
if not from_undo and self.multiline and self.auto_indent \
and substring == u'\n':
substring = self._auto_indent(substring)
mode = self.input_filter
if mode not in (None, 'int', 'float'):
substring = mode(substring, from_undo)
if not substring:
return
col, row = self.cursor
cindex = self.cursor_index()
text = self._lines[row]
len_str = len(substring)
new_text = text[:col] + substring + text[col:]
if mode is not None:
if mode == 'int':
if not re.match(self._insert_int_pat, new_text):
return
elif mode == 'float':
if not re.match(self._insert_float_pat, new_text):
return
self._set_line_text(row, new_text)
wrap = (self._get_text_width(
new_text,
self.tab_width,
self._label_cached) > (self.width - self.padding[0] -
self.padding[2]))
if len_str > 1 or substring == u'\n' or wrap:
# Avoid refreshing text on every keystroke.
# Allows for faster typing of text when the amount of text in
# TextInput gets large.
(
start, finish, lines, lineflags, len_lines
) = self._get_line_from_cursor(row, new_text)
# calling trigger here could lead to wrong cursor positioning
# and repeating of text when keys are added rapidly in a automated
# fashion. From Android Keyboard for example.
self._refresh_text_from_property(
'insert', start, finish, lines, lineflags, len_lines
)
self.cursor = self.get_cursor_from_index(cindex + len_str)
# handle undo and redo
self._set_unredo_insert(cindex, cindex + len_str, substring, from_undo)
def _get_line_from_cursor(self, start, new_text):
# get current paragraph from cursor position
finish = start
lines = self._lines
linesflags = self._lines_flags
if start and not linesflags[start]:
start -= 1
new_text = u''.join((lines[start], new_text))
try:
while not linesflags[finish + 1]:
new_text = u''.join((new_text, lines[finish + 1]))
finish += 1
except IndexError:
pass
lines, lineflags = self._split_smart(new_text)
len_lines = max(1, len(lines))
return start, finish, lines, lineflags, len_lines
def _set_unredo_insert(self, ci, sci, substring, from_undo):
# handle undo and redo
if from_undo:
return
self._undo.append({
'undo_command': ('insert', ci, sci),
'redo_command': (ci, substring)
})
# reset redo when undo is appended to
self._redo = []
def reset_undo(self):
'''Reset undo and redo lists from memory.
.. versionadded:: 1.3.0
'''
self._redo = self._undo = []
def do_redo(self):
'''Do redo operation.
.. versionadded:: 1.3.0
This action re-does any command that has been un-done by
do_undo/ctrl+z. This function is automatically called when
`ctrl+r` keys are pressed.
'''
try:
x_item = self._redo.pop()
undo_type = x_item['undo_command'][0]
_get_cusror_from_index = self.get_cursor_from_index
if undo_type == 'insert':
cindex, substring = x_item['redo_command']
self.cursor = _get_cusror_from_index(cindex)
self.insert_text(substring, True)
elif undo_type == 'bkspc':
self.cursor = _get_cusror_from_index(x_item['redo_command'])
self.do_backspace(from_undo=True)
elif undo_type == 'shiftln':
direction, rows, cursor = x_item['redo_command'][1:]
self._shift_lines(direction, rows, cursor, True)
else:
# delsel
cindex, scindex = x_item['redo_command']
self._selection_from = cindex
self._selection_to = scindex
self._selection = True
self.delete_selection(True)
self.cursor = _get_cusror_from_index(cindex)
self._undo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_undo(self):
'''Do undo operation.
.. versionadded:: 1.3.0
This action un-does any edits that have been made since the last
call to reset_undo().
This function is automatically called when `ctrl+z` keys are pressed.
'''
try:
x_item = self._undo.pop()
undo_type = x_item['undo_command'][0]
self.cursor = self.get_cursor_from_index(x_item['undo_command'][1])
if undo_type == 'insert':
cindex, scindex = x_item['undo_command'][1:]
self._selection_from = cindex
self._selection_to = scindex
self._selection = True
self.delete_selection(True)
elif undo_type == 'bkspc':
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
elif undo_type == 'shiftln':
direction, rows, cursor = x_item['undo_command'][1:]
self._shift_lines(direction, rows, cursor, True)
else:
# delsel
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
self._redo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_backspace(self, from_undo=False, mode='bkspc'):
'''Do backspace operation from the current cursor position.
This action might do several things:
- removing the current selection if available.
- removing the previous char and move the cursor back.
- do nothing, if we are at the start.
'''
# IME system handles its own backspaces
if self.readonly or self._ime_composition:
return
col, row = self.cursor
_lines = self._lines
text = _lines[row]
cursor_index = self.cursor_index()
text_last_line = _lines[row - 1]
if col == 0 and row == 0:
return
_lines_flags = self._lines_flags
start = row
if col == 0:
substring = u'\n' if _lines_flags[row] else u' '
new_text = text_last_line + text
self._set_line_text(row - 1, new_text)
self._delete_line(row)
start = row - 1
else:
# ch = text[col-1]
substring = text[col - 1]
new_text = text[:col - 1] + text[col:]
self._set_line_text(row, new_text)
# refresh just the current line instead of the whole text
start, finish, lines, lineflags, len_lines = (
self._get_line_from_cursor(start, new_text)
)
# avoid trigger refresh, leads to issue with
# keys/text send rapidly through code.
self._refresh_text_from_property(
'del', start, finish, lines, lineflags, len_lines
)
self.cursor = self.get_cursor_from_index(cursor_index - 1)
# handle undo and redo
self._set_undo_redo_bkspc(
cursor_index,
cursor_index - 1,
substring, from_undo)
def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('bkspc', new_index, substring),
'redo_command': ol_index})
# reset redo when undo is appended to
self._redo = []
_re_whitespace = re.compile(r'\s+')
def _move_cursor_word_left(self, index=None):
pos = index or self.cursor_index()
if pos == 0:
return self.cursor
lines = self._lines
col, row = self.get_cursor_from_index(pos)
if col == 0:
row -= 1
col = len(lines[row])
while True:
matches = list(self._re_whitespace.finditer(lines[row], 0, col))
if not matches:
if col == 0:
if row == 0:
return 0, 0
row -= 1
col = len(lines[row])
continue
return 0, row
match = matches[-1]
mpos = match.end()
if mpos == col:
if len(matches) > 1:
match = matches[-2]
mpos = match.end()
else:
if match.start() == 0:
if row == 0:
return 0, 0
row -= 1
col = len(lines[row])
continue
return 0, row
col = mpos
return col, row
def _move_cursor_word_right(self, index=None):
pos = index or self.cursor_index()
col, row = self.get_cursor_from_index(pos)
lines = self._lines
mrow = len(lines) - 1
if row == mrow and col == len(lines[row]):
return col, row
if col == len(lines[row]):
row += 1
col = 0
while True:
matches = list(self._re_whitespace.finditer(lines[row], col))
if not matches:
if col == len(lines[row]):
if row == mrow:
return col, row
row += 1
col = 0
continue
return len(lines[row]), row
match = matches[0]
mpos = match.start()
if mpos == col:
if len(matches) > 1:
match = matches[1]
mpos = match.start()
else:
if match.end() == len(lines[row]):
if row == mrow:
return col, row
row += 1
col = 0
continue
return len(lines[row]), row
col = mpos
return col, row
def _expand_range(self, ifrom, ito=None):
if ito is None:
ito = ifrom
rfrom = self.get_cursor_from_index(ifrom)[1]
rtcol, rto = self.get_cursor_from_index(ito)
rfrom, rto = self._expand_rows(rfrom, rto + 1 if rtcol else rto)
return (self.cursor_index((0, rfrom)),
self.cursor_index((0, rto)))
def _expand_rows(self, rfrom, rto=None):
if rto is None or rto == rfrom:
rto = rfrom + 1
lines = self._lines
flags = list(reversed(self._lines_flags))
while rfrom > 0 and not (flags[rfrom - 1] & FL_IS_NEWLINE):
rfrom -= 1
rmax = len(lines) - 1
while 0 < rto < rmax and not (flags[rto - 1] & FL_IS_NEWLINE):
rto += 1
return max(0, rfrom), min(rmax, rto)
def _shift_lines(
self, direction, rows=None, old_cursor=None, from_undo=False
):
if self._selection_callback:
if from_undo:
self._selection_callback.cancel()
else:
return
lines = self._lines
flags = list(reversed(self._lines_flags))
labels = self._lines_labels
rects = self._lines_rects
orig_cursor = self.cursor
sel = None
if old_cursor is not None:
self.cursor = old_cursor
if not rows:
sindex = self.selection_from
eindex = self.selection_to
if (sindex or eindex) and sindex != eindex:
sindex, eindex = tuple(sorted((sindex, eindex)))
sindex, eindex = self._expand_range(sindex, eindex)
else:
sindex, eindex = self._expand_range(self.cursor_index())
srow = self.get_cursor_from_index(sindex)[1]
erow = self.get_cursor_from_index(eindex)[1]
sel = sindex, eindex
if direction < 0 and srow > 0:
psrow, perow = self._expand_rows(srow - 1)
rows = ((srow, erow), (psrow, perow))
elif direction > 0 and erow < len(lines) - 1:
psrow, perow = self._expand_rows(erow)
rows = ((srow, erow), (psrow, perow))
else:
(srow, erow), (psrow, perow) = rows
if direction < 0:
m1srow, m1erow = psrow, perow
m2srow, m2erow = srow, erow
cdiff = psrow - perow
xdiff = srow - erow
else:
m1srow, m1erow = srow, erow
m2srow, m2erow = psrow, perow
cdiff = perow - psrow
xdiff = erow - srow
self._lines_flags = list(reversed(chain(
flags[:m1srow],
flags[m2srow:m2erow],
flags[m1srow:m1erow],
flags[m2erow:],
)))
self._lines[:] = (
lines[:m1srow]
+ lines[m2srow:m2erow]
+ lines[m1srow:m1erow]
+ lines[m2erow:]
)
self._lines_labels = (
labels[:m1srow]
+ labels[m2srow:m2erow]
+ labels[m1srow:m1erow]
+ labels[m2erow:]
)
self._lines_rects = (
rects[:m1srow]
+ rects[m2srow:m2erow]
+ rects[m1srow:m1erow]
+ rects[m2erow:]
)
self._trigger_update_graphics()
csrow = srow + cdiff
cerow = erow + cdiff
sel = (
self.cursor_index((0, csrow)),
self.cursor_index((0, cerow))
)
self.cursor = self.cursor_col, self.cursor_row + cdiff
if not from_undo:
undo_rows = ((srow + cdiff, erow + cdiff),
(psrow - xdiff, perow - xdiff))
self._undo.append({
'undo_command': ('shiftln', direction * -1, undo_rows,
self.cursor),
'redo_command': ('shiftln', direction, rows, orig_cursor),
})
self._redo = []
if sel:
def cb(dt):
self.select_text(*sel)
self._selection_callback = None
self._selection_callback = Clock.schedule_once(cb)
@property
def pgmove_speed(self):
"""how much vertical distance hitting pg_up or pg_down will move
"""
return int(
self.height
/ (self.line_height + self.line_spacing) - 1
)
def _move_cursor_up(self, col, row, control=False, alt=False):
if self.multiline and control:
self.scroll_y = max(0, self.scroll_y - self.line_height)
elif not self.readonly and self.multiline and alt:
self._shift_lines(-1)
return
else:
row = max(row - 1, 0)
col = min(len(self._lines[row]), col)
return col, row
def _move_cursor_down(self, col, row, control, alt):
if self.multiline and control:
maxy = self.minimum_height - self.height
self.scroll_y = max(
0,
min(maxy, self.scroll_y + self.line_height)
)
elif not self.readonly and self.multiline and alt:
self._shift_lines(1)
return
else:
row = min(row + 1, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
return col, row
def do_cursor_movement(self, action, control=False, alt=False):
'''Move the cursor relative to its current position.
Action can be one of :
- cursor_left: move the cursor to the left
- cursor_right: move the cursor to the right
- cursor_up: move the cursor on the previous line
- cursor_down: move the cursor on the next line
- cursor_home: move the cursor at the start of the current line
- cursor_end: move the cursor at the end of current line
- cursor_pgup: move one "page" before
- cursor_pgdown: move one "page" after
In addition, the behavior of certain actions can be modified:
- control + cursor_left: move the cursor one word to the left
- control + cursor_right: move the cursor one word to the right
- control + cursor_up: scroll up one line
- control + cursor_down: scroll down one line
- control + cursor_home: go to beginning of text
- control + cursor_end: go to end of text
- alt + cursor_up: shift line(s) up
- alt + cursor_down: shift line(s) down
.. versionchanged:: 1.9.1
'''
if not self._lines:
return
col, row = self.cursor
if action == 'cursor_up':
result = self._move_cursor_up(col, row, control, alt)
if result:
col, row = result
else:
return
elif action == 'cursor_down':
result = self._move_cursor_down(col, row, control, alt)
if result:
col, row = result
else:
return
elif action == 'cursor_home':
col = 0
if control:
row = 0
elif action == 'cursor_end':
if control:
row = len(self._lines) - 1
col = len(self._lines[row])
elif action == 'cursor_pgup':
row = max(0, row - self.pgmove_speed)
col = min(len(self._lines[row]), col)
elif action == 'cursor_pgdown':
row = min(row + self.pgmove_speed, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
elif (
self._selection and self._selection_finished
and self._selection_from < self._selection_to
and action == 'cursor_left'
):
current_selection_to = self._selection_to
while self._selection_from != current_selection_to:
current_selection_to -= 1
if col:
col -= 1
else:
row -= 1
col = len(self._lines[row])
elif (
self._selection and self._selection_finished
and self._selection_from > self._selection_to
and action == 'cursor_right'
):
current_selection_to = self._selection_to
while self._selection_from != current_selection_to:
current_selection_to += 1
if len(self._lines[row]) > col:
col += 1
else:
row += 1
col = 0
elif action == 'cursor_left':
if not self.password and control:
col, row = self._move_cursor_word_left()
else:
if col == 0:
if row:
row -= 1
col = len(self._lines[row])
else:
col, row = col - 1, row
elif action == 'cursor_right':
if not self.password and control:
col, row = self._move_cursor_word_right()
else:
if col == len(self._lines[row]):
if row < len(self._lines) - 1:
col = 0
row += 1
else:
col, row = col + 1, row
dont_move_cursor = control and action in ['cursor_up', 'cursor_down']
if dont_move_cursor:
self._trigger_update_graphics()
else:
self.cursor = col, row
def get_cursor_from_xy(self, x, y):
'''Return the (col, row) of the cursor from an (x, y) position.
'''
padding_left, padding_top, padding_right, padding_bottom = self.padding
lines = self._lines
dy = self.line_height + self.line_spacing
cursor_x = x - self.x
scroll_y = self.scroll_y
scroll_x = self.scroll_x
scroll_y = scroll_y / dy if scroll_y > 0 else 0
cursor_y = (self.top - padding_top + scroll_y * dy) - y
cursor_y = int(boundary(
round(cursor_y / dy - 0.5),
0,
len(lines) - 1
))
get_text_width = self._get_text_width
tab_width = self.tab_width
label_cached = self._label_cached
# Offset for horizontal text alignment
xoff = 0
halign = self.halign
base_dir = self.base_direction or self._resolved_base_dir
auto_halign_r = halign == 'auto' and base_dir and 'rtl' in base_dir
if halign == 'center':
viewport_width = self.width - padding_left - padding_right
xoff = int((viewport_width - self._get_row_width(cursor_y)) / 2)
elif halign == 'right' or auto_halign_r:
viewport_width = self.width - padding_left - padding_right
xoff = viewport_width - self._get_row_width(cursor_y)
for i in range(0, len(lines[cursor_y])):
line_y = lines[cursor_y]
if cursor_x + scroll_x < (
xoff
+ get_text_width(line_y[:i], tab_width, label_cached)
+ get_text_width(line_y[i], tab_width, label_cached) * 0.6
+ padding_left
):
cursor_x = i
break
return int(cursor_x), int(cursor_y)
#
# Selection control
#
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self._selection = False
self._selection_finished = True
self._selection_touch = None
self.selection_text = u''
self._trigger_update_graphics()
def delete_selection(self, from_undo=False):
'''Delete the current text selection (if any).
'''
if self.readonly:
return
self._hide_handles(EventLoop.window)
scroll_x = self.scroll_x
scroll_y = self.scroll_y
cc, cr = self.cursor
if not self._selection:
return
text = self.text
a, b = sorted((self._selection_from, self._selection_to))
self.cursor = (start_col, start_row) = self.get_cursor_from_index(a)
end_col, end_row = self.get_cursor_from_index(b)
cur_line = (
self._lines[start_row][:start_col]
+ self._lines[end_row][end_col:]
)
lines, lineflags = self._split_smart(cur_line)
if start_row == end_row:
self._set_line_text(start_row, cur_line)
else:
self._refresh_text_from_property(
'del', start_row, end_row, lines, lineflags, len(lines)
)
self.scroll_x = scroll_x
self.scroll_y = scroll_y
# handle undo and redo for delete selection
self._set_unredo_delsel(a, b, text[a:b], from_undo)
self.cancel_selection()
def _set_unredo_delsel(self, a, b, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('delsel', a, substring),
'redo_command': (a, b)})
# reset redo when undo is appended to
self._redo = []
def _update_selection(self, finished=False):
'''Update selection text and order of from/to if finished is True.
Can be called multiple times until finished is True.
'''
a, b = int(self._selection_from), int(self._selection_to)
if a > b:
a, b = b, a
self._selection_finished = finished
_selection_text = self.text[a:b]
self.selection_text = ("" if not self.allow_copy else
((self.password_mask * (b - a)) if
self.password else _selection_text))
if not finished:
self._selection = True
else:
self._selection = bool(len(_selection_text))
self._selection_touch = None
if a == 0:
# update graphics only on new line
# allows smoother scrolling, noticeably
# faster when dealing with large text.
self._update_graphics_selection()
# self._trigger_update_graphics()
#
# Touch control
#
def long_touch(self, dt):
self._long_touch_ev = None
if self._selection_to == self._selection_from:
pos = self.to_local(*self._long_touch_pos, relative=False)
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
def on_double_tap(self):
'''This event is dispatched when a double tap happens
inside TextInput. The default behavior is to select the
word around the current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = int(self.cursor_index())
cc = int(self.cursor_col)
line = self._lines[self.cursor_row]
len_line = len(line)
start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1)
end = line[cc:].find(u' ')
end = end if end > - 1 else (len_line - cc)
Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end))
def on_triple_tap(self):
'''This event is dispatched when a triple tap happens
inside TextInput. The default behavior is to select the
line around current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
sindex, eindex = self._expand_range(ci)
Clock.schedule_once(lambda dt: self.select_text(sindex, eindex))
def on_quad_touch(self):
'''This event is dispatched when four fingers are touching
inside TextInput. The default behavior is to select all text.
Override this to provide different behavior. Alternatively,
you can bind to this event to provide additional functionality.
'''
Clock.schedule_once(lambda dt: self.select_all())
def on_touch_down(self, touch):
if self.disabled:
return
touch_pos = touch.pos
if not self.collide_point(*touch_pos):
return False
if super().on_touch_down(touch):
return True
if self.focus:
self._trigger_cursor_reset()
# Check for scroll wheel
if 'button' in touch.profile and touch.button.startswith('scroll'):
# TODO: implement 'scrollleft' and 'scrollright'
scroll_type = touch.button[6:]
if scroll_type == 'down':
if self.multiline:
if self.scroll_y > 0:
self.scroll_y -= self.line_height
self._trigger_update_graphics()
else:
if self.scroll_x > 0:
self.scroll_x -= self.line_height
self._trigger_update_graphics()
if scroll_type == 'up':
if self.multiline:
viewport_height = self.height\
- self.padding[1] - self.padding[3]
text_height = len(self._lines) * (self.line_height
+ self.line_spacing)
if viewport_height < text_height - self.scroll_y:
self.scroll_y += self.line_height
self._trigger_update_graphics()
else:
if (self.scroll_x + self.width <
self._lines_rects[-1].texture.size[0]):
self.scroll_x += self.line_height
self._trigger_update_graphics()
return True
touch.grab(self)
self._touch_count += 1
if touch.is_double_tap:
self.dispatch('on_double_tap')
if touch.is_triple_tap:
self.dispatch('on_triple_tap')
if self._touch_count == 4:
self.dispatch('on_quad_touch')
self._hide_cut_copy_paste(EventLoop.window)
# schedule long touch for paste
self._long_touch_pos = touch.pos
self._long_touch_ev = Clock.schedule_once(self.long_touch, .5)
self.cursor = self.get_cursor_from_xy(*touch_pos)
if not self._selection_touch:
self.cancel_selection()
self._selection_touch = touch
self._selection_from = self._selection_to = self.cursor_index()
self._update_selection()
if CutBuffer and 'button' in touch.profile and \
touch.button == 'middle':
self.insert_text(CutBuffer.get_cutbuffer())
return True
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
if not self.focus:
touch.ungrab(self)
if self._selection_touch is touch:
self._selection_touch = None
return False
if self._selection_touch is touch:
self.cursor = self.get_cursor_from_xy(touch.x, touch.y)
self._selection_to = self.cursor_index()
self._update_selection()
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._touch_count -= 1
# schedule long touch for paste
if self._long_touch_ev is not None:
self._long_touch_ev.cancel()
self._long_touch_ev = None
if not self.focus:
return False
if self._selection_touch is touch:
self._selection_to = self.cursor_index()
self._update_selection(True)
# show Bubble
win = EventLoop.window
if self._selection_to != self._selection_from:
self._show_cut_copy_paste(touch.pos, win)
elif self.use_handles:
self._hide_handles()
handle_middle = self._handle_middle
if handle_middle is None:
self._handle_middle = handle_middle = Selector(
source=self.handle_image_middle,
window=win,
target=self,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_middle.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
if not self._handle_middle.parent and self.text:
EventLoop.window.add_widget(handle_middle, canvas='after')
self._position_handles(mode='middle')
return True
def _handle_pressed(self, instance):
self._hide_cut_copy_paste()
from_, to_ = self._selection_from, self.selection_to
if from_ > to_:
self._selection_from, self._selection_to = to_, from_
def _handle_released(self, instance):
if self._selection_from == self.selection_to:
return
self._update_selection()
self._show_cut_copy_paste(
(
instance.right
if instance is self._handle_left
else instance.x,
instance.top + self.line_height
),
EventLoop.window
)
def _handle_move(self, instance, touch):
if touch.grab_current != instance:
return
get_cursor = self.get_cursor_from_xy
handle_right = self._handle_right
handle_left = self._handle_left
handle_middle = self._handle_middle
try:
touch.push()
touch.apply_transform_2d(self.to_widget)
x, y = touch.pos
finally:
touch.pop()
cursor = get_cursor(
x,
y + instance._touch_diff + (self.line_height / 2)
)
if instance != touch.grab_current:
return
if instance == handle_middle:
self.cursor = cursor
self._position_handles(mode='middle')
return
cindex = self.cursor_index(cursor=cursor)
if instance == handle_left:
self._selection_from = cindex
elif instance == handle_right:
self._selection_to = cindex
self._trigger_update_graphics()
self._trigger_position_handles()
def _position_handles(self, *args, **kwargs):
if not self.text:
return
mode = kwargs.get('mode', 'both')
lh = self.line_height
handle_middle = self._handle_middle
if handle_middle:
hp_mid = self.cursor_pos
pos = self.to_local(*hp_mid, relative=True)
handle_middle.x = pos[0] - handle_middle.width / 2
handle_middle.top = pos[1] - lh
if mode[0] == 'm':
return
group = self.canvas.get_group('selection')
if not group:
return
EventLoop.window.remove_widget(self._handle_middle)
handle_left = self._handle_left
if not handle_left:
return
hp_left = group[2].pos
handle_left.pos = self.to_local(*hp_left, relative=True)
handle_left.x -= handle_left.width
handle_left.y -= handle_left.height
handle_right = self._handle_right
last_rect = group[-1]
hp_right = last_rect.pos[0], last_rect.pos[1]
x, y = self.to_local(*hp_right, relative=True)
handle_right.x = x + last_rect.size[0]
handle_right.y = y - handle_right.height
def _hide_handles(self, win=None):
win = win or EventLoop.window
if win is None:
return
win.remove_widget(self._handle_right)
win.remove_widget(self._handle_left)
win.remove_widget(self._handle_middle)
def _show_handles(self, dt):
if not self.use_handles or not self.text:
return
win = EventLoop.window
handle_right = self._handle_right
handle_left = self._handle_left
if self._handle_left is None:
self._handle_left = handle_left = Selector(
source=self.handle_image_left,
target=self,
window=win,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_left.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
self._handle_right = handle_right = Selector(
source=self.handle_image_right,
target=self,
window=win,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_right.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
else:
if self._handle_left.parent:
self._position_handles()
return
if not self.parent:
return
self._trigger_position_handles()
if self.selection_from != self.selection_to:
self._handle_left.opacity = self._handle_right.opacity = 0
win.add_widget(self._handle_left, canvas='after')
win.add_widget(self._handle_right, canvas='after')
anim = Animation(opacity=1, d=.4)
anim.start(self._handle_right)
anim.start(self._handle_left)
def _show_cut_copy_paste(
self, pos, win, parent_changed=False, mode='', pos_in_window=False, *l
):
"""Show a bubble with cut copy and paste buttons"""
if not self.use_bubble:
return
bubble = self._bubble
if bubble is None:
self._bubble = bubble = TextInputCutCopyPaste(textinput=self)
self.fbind('parent', self._show_cut_copy_paste, pos, win, True)
def hide_(*args):
return self._hide_cut_copy_paste(win)
self.bind(
focus=hide_,
cursor_pos=hide_,
)
else:
win.remove_widget(bubble)
if not self.parent:
return
if parent_changed:
return
# Search the position from the touch to the window
lh, ls = self.line_height, self.line_spacing
x, y = pos
t_pos = (x, y) if pos_in_window else self.to_window(x, y)
bubble_size = bubble.size
bubble_hw = bubble_size[0] / 2.
win_size = win.size
bubble_pos = (t_pos[0], t_pos[1] + inch(.25))
if (bubble_pos[0] - bubble_hw) < 0:
# bubble beyond left of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (bubble_hw, (t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_left'
else:
bubble_pos = (bubble_hw, bubble_pos[1])
bubble.arrow_pos = 'bottom_left'
elif (bubble_pos[0] + bubble_hw) > win_size[0]:
# bubble beyond right of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (
win_size[0] - bubble_hw,
(t_pos[1]) - (lh + ls + inch(.25))
)
bubble.arrow_pos = 'top_right'
else:
bubble_pos = (win_size[0] - bubble_hw, bubble_pos[1])
bubble.arrow_pos = 'bottom_right'
else:
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (
bubble_pos[0],
(t_pos[1]) - (lh + ls + inch(.25))
)
bubble.arrow_pos = 'top_mid'
else:
bubble.arrow_pos = 'bottom_mid'
bubble_pos = self.to_widget(*bubble_pos, relative=True)
bubble.center_x = bubble_pos[0]
if bubble.arrow_pos[0] == 't':
bubble.top = bubble_pos[1]
else:
bubble.y = bubble_pos[1]
bubble.mode = mode
Animation.cancel_all(bubble)
bubble.opacity = 0
win.add_widget(bubble, canvas='after')
Animation(opacity=1, d=.225).start(bubble)
def _hide_cut_copy_paste(self, win=None):
bubble = self._bubble
if not bubble:
return
bubble.hide()
#
# Private
#
@staticmethod
def _reload_remove_observer(wr):
"""called when the textinput is deleted"""
if wr in _textinput_list:
_textinput_list.remove(wr)
def _on_textinput_focused(self, instance, value, *largs):
win = EventLoop.window
self.cancel_selection()
self._hide_cut_copy_paste(win)
if value:
if (
not (self.readonly or self.disabled)
or _is_desktop
and self._keyboard_mode == 'system'
):
self._trigger_cursor_reset()
self._editable = True
else:
self._editable = False
else:
self._do_blink_cursor_ev.cancel()
self._hide_handles(win)
def _ensure_clipboard(self):
global Clipboard, CutBuffer
if not Clipboard:
from kivy.core.clipboard import Clipboard, CutBuffer
def cut(self):
''' Copy current selection to clipboard then delete it from TextInput.
.. versionadded:: 1.8.0
'''
self._cut(self.selection_text)
def _cut(self, data):
self._ensure_clipboard()
Clipboard.copy(data)
self.delete_selection()
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
If no data is provided then current selection if present is copied.
.. versionadded:: 1.8.0
'''
self._ensure_clipboard()
if data:
return Clipboard.copy(data)
if self.selection_text:
return Clipboard.copy(self.selection_text)
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
self._ensure_clipboard()
data = Clipboard.paste()
self.delete_selection()
self.insert_text(data)
def _update_cutbuffer(self, *args):
CutBuffer.set_cutbuffer(self.selection_text)
def _get_text_width(self, text, tab_width, _label_cached):
"""Return the width of a text, according to the current line options"""
kw = self._get_line_options()
try:
cid = u'{}\0{}\0{}'.format(text, self.password, kw)
except UnicodeDecodeError:
cid = '{}\0{}\0{}'.format(text, self.password, kw)
width = Cache_get('textinput.width', cid)
if width:
return width
if not _label_cached:
_label_cached = self._label_cached
text = text.replace('\t', ' ' * tab_width)
if not self.password:
width = _label_cached.get_extents(text)[0]
else:
width = _label_cached.get_extents(
self.password_mask * len(text))[0]
Cache_append('textinput.width', cid, width)
return width
def on_cursor_blink(self, instance, value):
"""trigger blink event reset to switch blinking while focused"""
self._reset_cursor_blink()
def _do_blink_cursor(self, dt):
if not self.cursor_blink:
# ignore event if not triggered,
# stop if cursor_blink value changed right now
if self._do_blink_cursor_ev.is_triggered:
self._do_blink_cursor_ev.cancel()
# don't blink, make cursor visible
self._cursor_blink = False
return
# Callback for blinking the cursor.
self._cursor_blink = not self._cursor_blink
def _reset_cursor_blink(self, *args):
self._do_blink_cursor_ev.cancel()
self._cursor_blink = False
self._do_blink_cursor_ev()
def on_cursor(self, instance, value):
"""
When the cursor is moved, reset cursor blinking to keep it showing,
and update all the graphics.
"""
if self.focus:
self._trigger_cursor_reset()
self._trigger_update_graphics()
def _delete_line(self, idx):
"""Delete current line, and fix cursor position"""
assert(idx < len(self._lines))
self._lines_flags.pop(idx)
self._lines_labels.pop(idx)
self._lines.pop(idx)
self.cursor = self.cursor
def _set_line_text(self, line_num, text):
"""Set current line with other text than the default one."""
self._lines_labels[line_num] = self._create_line_label(text)
self._lines[line_num] = text
def _trigger_refresh_line_options(self, *largs):
if self._refresh_line_options_ev is not None:
self._refresh_line_options_ev.cancel()
else:
self._refresh_line_options_ev = Clock.create_trigger(
self._refresh_line_options, 0)
self._refresh_line_options_ev()
def _refresh_line_options(self, *largs):
self._line_options = None
self._get_line_options()
self._refresh_text_from_property()
self._refresh_hint_text()
self.cursor = self.get_cursor_from_index(len(self.text))
def _trigger_refresh_text(self, *largs):
if len(largs) and largs[0] == self:
largs = ()
if self._refresh_text_from_property_ev is not None:
self._refresh_text_from_property_ev.cancel()
self._refresh_text_from_property_ev = Clock.schedule_once(
lambda dt: self._refresh_text_from_property(*largs))
def _update_text_options(self, *largs):
Cache_remove('textinput.width')
self._trigger_refresh_text()
def _refresh_text_from_trigger(self, dt, *largs):
self._refresh_text_from_property(*largs)
def _refresh_text_from_property(self, *largs):
self._refresh_text(self.text, *largs)
def _refresh_text(self, text, *largs):
"""
Refresh all the lines from a new text.
By using cache in internal functions, this method should be fast.
"""
mode = 'all'
if len(largs) > 1:
mode, start, finish, _lines, _lines_flags, len_lines = largs
# start = max(0, start)
cursor = None
else:
cursor = self.cursor_index()
_lines, self._lines_flags = self._split_smart(text)
_lines_labels = []
_line_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x)
_lines_labels.append(lbl)
_line_rects.append(Rectangle(size=lbl.size))
if mode == 'all':
self._lines_labels = _lines_labels
self._lines_rects = _line_rects
self._lines[:] = _lines
elif mode == 'del':
if finish > start:
self._insert_lines(start,
finish if start == finish else (finish + 1),
len_lines, _lines_flags,
_lines, _lines_labels, _line_rects)
elif mode == 'insert':
self._insert_lines(
start,
finish if (start == finish and not len_lines)
else (finish + 1),
len_lines, _lines_flags, _lines, _lines_labels,
_line_rects)
min_line_ht = self._label_cached.get_extents('_')[1]
# with markup texture can be of height `1`
self.line_height = max(_lines_labels[0].height, min_line_ht)
# self.line_spacing = 2
# now, if the text change, maybe the cursor is not at the same place as
# before. so, try to set the cursor on the good place
row = self.cursor_row
self.cursor = self.get_cursor_from_index(
self.cursor_index() if cursor is None else cursor
)
# if we back to a new line, reset the scroll, otherwise, the effect is
# ugly
if self.cursor_row != row:
self.scroll_x = 0
# with the new text don't forget to update graphics again
self._trigger_update_graphics()
def _insert_lines(self, start, finish, len_lines, _lines_flags,
_lines, _lines_labels, _line_rects):
self_lines_flags = self._lines_flags
_lins_flags = []
_lins_flags.extend(self_lines_flags[:start])
if len_lines:
# if not inserting at first line then
if start:
# make sure line flags restored for first line
# _split_smart assumes first line to be not a new line
_lines_flags[0] = self_lines_flags[start]
_lins_flags.extend(_lines_flags)
_lins_flags.extend(self_lines_flags[finish:])
self._lines_flags = _lins_flags
_lins_lbls = []
_lins_lbls.extend(self._lines_labels[:start])
if len_lines:
_lins_lbls.extend(_lines_labels)
_lins_lbls.extend(self._lines_labels[finish:])
self._lines_labels = _lins_lbls
_lins_rcts = []
_lins_rcts.extend(self._lines_rects[:start])
if len_lines:
_lins_rcts.extend(_line_rects)
_lins_rcts.extend(self._lines_rects[finish:])
self._lines_rects = _lins_rcts
_lins = []
_lins.extend(self._lines[:start])
if len_lines:
_lins.extend(_lines)
_lins.extend(self._lines[finish:])
self._lines[:] = _lins
def _trigger_update_graphics(self, *largs):
self._update_graphics_ev.cancel()
self._update_graphics_ev()
def _update_graphics(self, *largs):
"""
Update all the graphics according to the current internal values.
"""
# This is a little bit complex, because we have to :
# - handle scroll_x
# - handle padding
# - create rectangle for the lines matching the viewport
# - crop the texture coordinates to match the viewport
# This is the first step of graphics, the second is the selection.
self.canvas.clear()
line_height = self.line_height
dy = line_height + self.line_spacing
# adjust view if the cursor is going outside the bounds
scroll_x = self.scroll_x
scroll_y = self.scroll_y
# draw labels
if (
not self._lines
or (not self._lines[0] and len(self._lines) == 1)
):
rects = self._hint_text_rects
labels = self._hint_text_labels
lines = self._hint_text_lines
else:
rects = self._lines_rects
labels = self._lines_labels
lines = self._lines
padding_left, padding_top, padding_right, padding_bottom = self.padding
x = self.x + padding_left
y = self.top - padding_top + scroll_y
miny = self.y + padding_bottom
maxy = self.top - padding_top
halign = self.halign
base_dir = self.base_direction
auto_halign_r = halign == 'auto' and base_dir and 'rtl' in base_dir
fst_visible_ln = None
viewport_pos = scroll_x, 0
for line_num, value in enumerate(lines):
if miny < y < maxy + dy:
if fst_visible_ln is None:
fst_visible_ln = line_num
y = self._draw_line(
value,
line_num,
labels[line_num],
viewport_pos,
line_height,
miny,
maxy,
x,
y,
base_dir,
halign,
rects,
auto_halign_r,
)
elif y <= miny:
line_num -= 1
break
y -= dy
if fst_visible_ln is not None:
self._visible_lines_range = (fst_visible_ln, line_num + 1)
else:
self._visible_lines_range = 0, 0
self._update_graphics_selection()
def _draw_line(
self,
value,
line_num,
texture,
viewport_pos,
line_height,
miny,
maxy,
x,
y,
base_dir,
halign,
rects,
auto_halign_r,
):
size = list(texture.size)
texcoords = texture.tex_coords[:]
# compute coordinate
padding_left, padding_top, padding_right, padding_bottom = self.padding
viewport_width = self.width - padding_left - padding_right
viewport_height = self.height - padding_top - padding_bottom
texture_width, texture_height = size
original_height, original_width = tch, tcw = texcoords[1:3]
# adjust size/texcoord according to viewport
if viewport_pos:
tcx, tcy = viewport_pos
tcx = tcx / texture_width * original_width
tcy = tcy / texture_height * original_height
else:
tcx, tcy = 0, 0
if texture_width * (1 - tcx) < viewport_width:
tcw = tcw - tcx
texture_width = tcw * texture_width
elif viewport_width < texture_width:
tcw = (viewport_width / texture_width) * tcw
texture_width = viewport_width
if viewport_height < texture_height:
tch = (viewport_height / texture_height) * tch
texture_height = viewport_height
# cropping
if y > maxy:
viewport_height = (maxy - y + line_height)
tch = (viewport_height / line_height) * original_height
tcy = original_height - tch
texture_height = viewport_height
if y - line_height < miny:
diff = miny - (y - line_height)
y += diff
viewport_height = line_height - diff
tch = (viewport_height / line_height) * original_height
texture_height = viewport_height
if tcw < 0:
# nothing to show
return y
top_left_corner = tcx, tcy + tch
top_right_corner = tcx + tcw, tcy + tch
bottom_right_corner = tcx + tcw, tcy
bottom_left_corner = tcx, tcy
texcoords = (
top_left_corner
+ top_right_corner
+ bottom_right_corner
+ bottom_left_corner
)
# Horizontal alignment
xoffset = 0
if not base_dir:
base_dir = self._resolved_base_dir = Label.find_base_direction(value) # noqa
if base_dir and halign == 'auto':
auto_halign_r = 'rtl' in base_dir
if halign == 'center':
xoffset = int((viewport_width - texture_width) / 2.)
elif halign == 'right' or auto_halign_r:
xoffset = max(0, int(viewport_width - texture_width))
# add rectangle
rect = rects[line_num]
rect.pos = int(xoffset + x), int(y - line_height)
rect.size = texture_width, texture_height
rect.texture = texture
rect.tex_coords = texcoords
# useful to debug rectangle sizes
# self.canvas.add(Color(0, .5, 0, .5, mode='rgba'))
# self.canvas.add(Rectangle(pos=rect.pos, size=rect.size))
# self.canvas.add(Color())
self.canvas.add(rect)
return y
def _update_graphics_selection(self):
if not self._selection:
return
# local references to avoid dot lookups later
padding_left, padding_top, padding_right, padding_bottom = self.padding
rects = self._lines_rects
label_cached = self._label_cached
lines = self._lines
tab_width = self.tab_width
top = self.top
get_text_width = self._get_text_width
get_cursor_from_index = self.get_cursor_from_index
draw_selection = self._draw_selection
canvas_add = self.canvas.add
selection_color = self.selection_color
# selection borders
a, b = sorted((self._selection_from, self._selection_to))
selection_start_col, selection_start_row = get_cursor_from_index(a)
selection_end_col, selection_end_row = get_cursor_from_index(b)
dy = self.line_height + self.line_spacing
x = self.x
y = top - padding_top + self.scroll_y - selection_start_row * dy
width = self.width
miny = self.y + padding_bottom
maxy = top - padding_top + dy
self.canvas.remove_group('selection')
first_visible_line = math.floor(self.scroll_y / dy)
last_visible_line = math.ceil((self.scroll_y + maxy - miny) / dy)
width_minus_padding = width - (padding_right + padding_left)
for line_num, rect in enumerate(
islice(
rects,
max(selection_start_row, first_visible_line),
min(selection_end_row + 1, last_visible_line),
),
start=selection_start_row
):
draw_selection(
rect.pos,
rect.size,
line_num,
(selection_start_col, selection_start_row),
(selection_end_col, selection_end_row),
lines,
get_text_width,
tab_width,
label_cached,
width_minus_padding,
padding_left,
padding_right,
x,
canvas_add,
selection_color
)
y -= dy
self._position_handles('both')
def _draw_selection(
self,
pos,
size,
line_num,
selection_start,
selection_end,
lines,
get_text_width,
tab_width,
label_cached,
width_minus_padding,
padding_left,
padding_right,
x,
canvas_add,
selection_color
):
selection_start_col, selection_start_row = selection_start
selection_end_col, selection_end_row = selection_end
# Draw the current selection on the widget.
if not selection_start_row <= line_num <= selection_end_row:
return
x, y = pos
w, h = size
beg = x
end = x + w
if line_num == selection_start_row:
line = lines[line_num]
beg -= self.scroll_x
beg += get_text_width(
line[:selection_start_col],
tab_width,
label_cached
)
if line_num == selection_end_row:
line = lines[line_num]
end = (x - self.scroll_x) + get_text_width(
line[:selection_end_col],
tab_width,
label_cached
)
beg = boundary(beg, x, x + width_minus_padding)
end = boundary(end, x, x + width_minus_padding)
if beg == end:
return
canvas_add(Color(*selection_color, group='selection'))
canvas_add(
Rectangle(
pos=(beg, y),
size=(end - beg, h),
group='selection'
)
)
def on_size(self, instance, value):
# if the size change, we might do invalid scrolling / text split
# size the text maybe be put after size_hint have been resolved.
self._trigger_refresh_text()
self._refresh_hint_text()
self.scroll_x = self.scroll_y = 0
def _get_row_width(self, row):
# Get the pixel width of the given row.
_labels = self._lines_labels
if row < len(_labels):
return _labels[row].width
return 0
def _get_cursor_pos(self):
# return the current cursor x/y from the row/col
dy = self.line_height + self.line_spacing
padding_left = self.padding[0]
padding_top = self.padding[1]
padding_right = self.padding[2]
left = self.x + padding_left
top = self.top - padding_top
y = top + self.scroll_y
y -= self.cursor_row * dy
# Horizontal alignment
halign = self.halign
viewport_width = self.width - padding_left - padding_right
cursor_offset = self.cursor_offset()
base_dir = self.base_direction or self._resolved_base_dir
auto_halign_r = halign == 'auto' and base_dir and 'rtl' in base_dir
if halign == 'center':
row_width = self._get_row_width(self.cursor_row)
x = (
left
+ (viewport_width - row_width) // 2
+ cursor_offset
- self.scroll_x
)
elif halign == 'right' or auto_halign_r:
row_width = self._get_row_width(self.cursor_row)
x = (
left
+ viewport_width
- row_width
+ cursor_offset
- self.scroll_x
)
else:
x = left + cursor_offset - self.scroll_x
return x, y
def _get_cursor_visual_height(self):
# Return the height of the cursor's visible part
_, cy = map(int, self.cursor_pos)
max_y = self.top - self.padding[1]
min_y = self.y + self.padding[3]
lh = self.line_height
if cy > max_y:
return lh - min(lh, cy - max_y)
else:
return min(lh, max(0, cy - min_y))
def _get_cursor_visual_pos(self):
# Return the position of the cursor's top visible point
cx, cy = map(int, self.cursor_pos)
max_y = self.top - self.padding[3]
return [cx, min(max_y, cy)]
def _get_line_options(self):
# Get or create line options, to be used for Label creation
if self._line_options is None:
self._line_options = kw = {
'font_size': self.font_size,
'font_name': self.font_name,
'font_context': self.font_context,
'font_family': self.font_family,
'text_language': self.text_language,
'base_direction': self.base_direction,
'anchor_x': 'left',
'anchor_y': 'top',
'padding_x': 0,
'padding_y': 0,
'padding': (0, 0)
}
self._label_cached = Label(**kw)
return self._line_options
def _create_line_label(self, text, hint=False):
# Create a label from a text, using line options
ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width)
if self.password and not hint: # Don't replace hint_text with *
ntext = self.password_mask * len(ntext)
kw = self._get_line_options()
cid = '%s\0%s' % (ntext, str(kw))
texture = Cache_get('textinput.label', cid)
if texture is None:
# FIXME right now, we can't render very long line...
# if we move on "VBO" version as fallback, we won't need to
# do this. try to find the maximum text we can handle
label = None
label_len = len(ntext)
ld = None
# check for blank line
if not ntext:
texture = Texture.create(size=(1, 1))
Cache_append('textinput.label', cid, texture)
return texture
while True:
try:
label = Label(text=ntext[:label_len], **kw)
label.refresh()
if ld is not None and ld > 2:
ld //= 2
label_len += ld
else:
break
except:
# exception happen when we tried to render the text
# reduce it...
if ld is None:
ld = len(ntext)
ld //= 2
if ld < 2 and label_len:
label_len -= 1
label_len -= ld
continue
# ok, we found it.
texture = label.texture
Cache_append('textinput.label', cid, texture)
return texture
def _tokenize(self, text):
# Tokenize a text string from some delimiters
if text is None:
return
delimiters = u' ,\'".;:\n\r\t'
oldindex = 0
for index, char in enumerate(text):
if char not in delimiters:
continue
if oldindex != index:
yield text[oldindex:index]
yield text[index:index + 1]
oldindex = index + 1
yield text[oldindex:]
def _split_smart(self, text):
"""
Do a "smart" split. If not multiline, or if wrap is set,
we are not doing smart split, just a split on line break.
Otherwise, we are trying to split as soon as possible, to prevent
overflow on the widget.
"""
# depend of the options, split the text on line, or word
if not self.multiline or not self.do_wrap:
lines = text.split(u'\n')
lines_flags = [0] + [FL_IS_LINEBREAK] * (len(lines) - 1)
return lines, lines_flags
# no autosize, do wordwrap.
x = flags = 0
line = []
lines = []
lines_flags = []
_join = u''.join
lines_append, lines_flags_append = lines.append, lines_flags.append
padding_left = self.padding[0]
padding_right = self.padding[2]
width = self.width - padding_left - padding_right
text_width = self._get_text_width
_tab_width, _label_cached = self.tab_width, self._label_cached
# try to add each word on current line.
for word in self._tokenize(text):
is_newline = (word == u'\n')
w = text_width(word, _tab_width, _label_cached)
# if we have more than the width, or if it's a newline,
# push the current line, and create a new one
if (x + w > width and line) or is_newline:
lines_append(_join(line))
lines_flags_append(flags)
flags = 0
line = []
x = 0
if is_newline:
flags |= FL_IS_LINEBREAK
elif width >= 1 and w > width:
while w > width:
split_width = split_pos = 0
# split the word
for c in word:
cw = self._get_text_width(
c, self.tab_width, self._label_cached
)
if split_width + cw > width:
break
split_width += cw
split_pos += 1
if split_width == split_pos == 0:
# can't fit the word in, give up
break
lines_append(word[:split_pos])
lines_flags_append(flags)
flags = FL_IS_WORDBREAK
word = word[split_pos:]
w -= split_width
x = w
line.append(word)
else:
x += w
line.append(word)
if line or flags & FL_IS_LINEBREAK:
lines_append(_join(line))
lines_flags_append(flags)
return lines, lines_flags
def _key_down(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
# handle deletion
if (
self._selection
and internal_action in (None, 'del', 'backspace', 'enter')
and (internal_action != 'enter' or self.multiline)
):
self.delete_selection()
elif internal_action == 'del':
# Move cursor one char to the right. If that was successful,
# do a backspace (effectively deleting char right of cursor)
cursor = self.cursor
self.do_cursor_movement('cursor_right')
if cursor != self.cursor:
self.do_backspace(mode='del')
elif internal_action == 'backspace':
self.do_backspace()
# handle action keys and text insertion
if internal_action is None:
self.insert_text(displayed_str)
elif internal_action in ('shift', 'shift_L', 'shift_R'):
if not self._selection:
self._selection_from = self._selection_to = self.cursor_index()
self._selection = True
self._selection_finished = False
elif internal_action == 'ctrl_L':
self._ctrl_l = True
elif internal_action == 'ctrl_R':
self._ctrl_r = True
elif internal_action == 'alt_L':
self._alt_l = True
elif internal_action == 'alt_R':
self._alt_r = True
elif internal_action.startswith('cursor_'):
cc, cr = self.cursor
self.do_cursor_movement(
internal_action,
self._ctrl_l or self._ctrl_r,
self._alt_l or self._alt_r
)
if self._selection and not self._selection_finished:
self._selection_to = self.cursor_index()
self._update_selection()
else:
self.cancel_selection()
elif internal_action == 'enter':
if self.multiline:
self.insert_text(u'\n')
else:
self.dispatch('on_text_validate')
if self.text_validate_unfocus:
self.focus = False
elif internal_action == 'escape':
self.focus = False
def _key_up(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action in ('shift', 'shift_L', 'shift_R'):
if self._selection:
self._update_selection(True)
elif internal_action == 'ctrl_L':
self._ctrl_l = False
elif internal_action == 'ctrl_R':
self._ctrl_r = False
elif internal_action == 'alt_L':
self._alt_l = False
elif internal_action == 'alt_R':
self._alt_r = False
def keyboard_on_key_down(self, window, keycode, text, modifiers):
key, _ = keycode
win = EventLoop.window
# This allows *either* ctrl *or* cmd, but not both.
modifiers = set(modifiers) - {'capslock', 'numlock'}
is_shortcut = (
modifiers == {'ctrl'}
or _is_osx and modifiers == {'meta'}
)
is_interesting_key = key in self.interesting_keys.keys()
if (
not self.write_tab
and super().keyboard_on_key_down(window, keycode, text, modifiers)
):
return True
if text and is_shortcut and not is_interesting_key:
self._handle_shortcut(key)
elif self._editable and text and not is_interesting_key:
self._hide_handles(win)
self._hide_cut_copy_paste(win)
win.remove_widget(self._handle_middle)
# check for command modes
# we use \x01INFO\x02 to get info from IME on mobiles
# pygame seems to pass \x01 as the unicode for ctrl+a
# checking for modifiers ensures conflict resolution.
first_char = ord(text[0])
if not modifiers and first_char == 1:
self._command_mode = True
self._command = ''
if not modifiers and first_char == 2:
self._command_mode = False
self._command = self._command[1:]
if self._command_mode:
self._command += text
return
_command = self._command
if _command and first_char == 2:
self._handle_command(_command)
return
else:
if EventLoop.window.managed_textinput:
# we expect to get managed key input via on_textinput
return
if self._selection:
self.delete_selection()
self.insert_text(text)
# self._recalc_size()
return
if is_interesting_key:
self._hide_cut_copy_paste(win)
self._hide_handles(win)
if key == 27: # escape
self.focus = False
return True
elif key == 9: # tab
self.delete_selection()
self.insert_text(u'\t')
return True
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_down(key)
def _handle_command(self, command):
from_undo = True
command, data = command.split(':')
self._command = ''
if self._selection:
self.delete_selection()
if command == 'DEL':
count = int(data)
if not count:
self.delete_selection(from_undo=True)
end = self.cursor_index()
self._selection_from = max(end - count, 0)
self._selection_to = end
self._selection = True
self.delete_selection(from_undo=True)
return
elif command == 'INSERT':
self.insert_text(data, from_undo)
elif command == 'INSERTN':
from_undo = False
self.insert_text(data, from_undo)
elif command == 'SELWORD':
self.dispatch('on_double_tap')
elif command == 'SEL':
if data == '0':
Clock.schedule_once(lambda dt: self.cancel_selection())
elif command == 'CURCOL':
self.cursor = int(data), self.cursor_row
def _handle_shortcut(self, key):
# actions that can be done in readonly
if key == ord('a'): # select all
self.select_all()
elif key == ord('c'): # copy selection
self.copy()
if not self._editable:
return
# actions that can be done only if editable
if key == ord('x'): # cut selection
self._cut(self.selection_text)
elif key == ord('v'): # paste clipboard content
self.paste()
elif key == ord('z'): # undo
self.do_undo()
elif key == ord('r'): # redo
self.do_redo()
def keyboard_on_key_up(self, window, keycode):
key = keycode[0]
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_up(key)
def keyboard_on_textinput(self, window, text):
if self._selection:
self.delete_selection()
self.insert_text(text, False)
# current IME composition in progress by the IME system, or '' if nothing
_ime_composition = StringProperty('')
# cursor position of last IME event
_ime_cursor = ListProperty(None, allownone=True)
def _bind_keyboard(self):
super()._bind_keyboard()
Window.bind(on_textedit=self.window_on_textedit)
def _unbind_keyboard(self):
super()._unbind_keyboard()
Window.unbind(on_textedit=self.window_on_textedit)
def window_on_textedit(self, window, ime_input):
text_lines = self._lines or ['']
if self._ime_composition:
pcc, pcr = self._ime_cursor
text = text_lines[pcr]
len_ime = len(self._ime_composition)
if text[pcc - len_ime:pcc] == self._ime_composition: # always?
remove_old_ime_text = text[:pcc - len_ime] + text[pcc:]
ci = self.cursor_index()
self._refresh_text_from_property(
"insert",
*self._get_line_from_cursor(pcr, remove_old_ime_text)
)
self.cursor = self.get_cursor_from_index(ci - len_ime)
if ime_input:
if self._selection:
self.delete_selection()
cc, cr = self.cursor
text = text_lines[cr]
new_text = text[:cc] + ime_input + text[cc:]
self._refresh_text_from_property(
"insert", *self._get_line_from_cursor(cr, new_text)
)
self.cursor = self.get_cursor_from_index(
self.cursor_index() + len(ime_input)
)
self._ime_composition = ime_input
self._ime_cursor = self.cursor
def on__hint_text(self, instance, value):
self._refresh_hint_text()
def _refresh_hint_text(self):
_lines, self._hint_text_flags = self._split_smart(self.hint_text)
_hint_text_labels = []
_hint_text_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x, hint=True)
_hint_text_labels.append(lbl)
_hint_text_rects.append(Rectangle(size=lbl.size))
self._hint_text_lines[:] = _lines
self._hint_text_labels = _hint_text_labels
self._hint_text_rects = _hint_text_rects
# Remember to update graphics
self._trigger_update_graphics()
#
# Properties
#
_lines = ListProperty([])
_hint_text_lines = ListProperty([])
_editable = BooleanProperty(True)
_insert_int_pat = re.compile(u'^-?[0-9]*$')
_insert_float_pat = re.compile(u'^-?[0-9]*\\.?[0-9]*$')
_cursor_blink = BooleanProperty(False)
_cursor_visual_pos = AliasProperty(
_get_cursor_visual_pos, None, bind=['cursor_pos']
)
_cursor_visual_height = AliasProperty(
_get_cursor_visual_height, None, bind=['cursor_pos']
)
readonly = BooleanProperty(False)
'''If True, the user will not be able to change the content of a textinput.
.. versionadded:: 1.3.0
:attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
text_validate_unfocus = BooleanProperty(True)
'''If True, the :meth:`TextInput.on_text_validate` event will unfocus the
widget, therefore make it stop listening to the keyboard. When disabled,
the :meth:`TextInput.on_text_validate` event can be fired multiple times
as the result of TextInput keeping the focus enabled.
.. versionadded:: 1.10.1
:attr:`text_validate_unfocus` is
a :class:`~kivy.properties.BooleanProperty` and defaults to True.
'''
multiline = BooleanProperty(True)
'''If True, the widget will be able show multiple lines of text. If False,
the "enter" keypress will defocus the textinput instead of adding a new
line.
:attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
do_wrap = BooleanProperty(True)
'''If True, and the text is multiline, then lines larger than the width of
the widget will wrap around to the next line, avoiding the need for
horizontal scrolling. Disabling this option ensure one line is always
displayed as one line.
:attr:`do_wrap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to True.
versionadded:: 2.1.0
'''
password = BooleanProperty(False)
'''If True, the widget will display its characters as the character
set in :attr:`password_mask`.
.. versionadded:: 1.2.0
:attr:`password` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
password_mask = StringProperty('*')
'''Sets the character used to mask the text when :attr:`password` is True.
.. versionadded:: 1.10.0
:attr:`password_mask` is a :class:`~kivy.properties.StringProperty` and
defaults to `'*'`.
'''
cursor_blink = BooleanProperty(True)
'''This property is used to set whether the graphic cursor should blink
or not.
.. versionchanged:: 1.10.1
`cursor_blink` has been refactored to enable switching the blinking
on/off and the previous behavior has been moved to a private
`_cursor_blink` property. The previous default value `False` has been
changed to `True`.
:attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def _get_cursor(self):
return self._cursor
def _set_cursor(self, pos):
if not self._lines:
self._trigger_refresh_text()
return
l = self._lines
cr = boundary(pos[1], 0, len(l) - 1)
cc = boundary(pos[0], 0, len(l[cr]))
cursor = cc, cr
# adjust scrollview to ensure that the cursor will be always inside our
# viewport.
self._adjust_viewport(cc, cr)
if self._cursor == cursor:
return
self._cursor = cursor
return True
@triggered(timeout=-1)
def _adjust_viewport(self, cc, cr):
padding_left = self.padding[0]
padding_right = self.padding[2]
viewport_width = self.width - padding_left - padding_right
sx = self.scroll_x
offset = self.cursor_offset()
# if offset is outside the current bounds, readjust
if offset - sx >= viewport_width:
self.scroll_x = offset - viewport_width
elif offset < sx + 1:
self.scroll_x = offset
# do the same for Y
# this algo try to center the cursor as much as possible
dy = self.line_height + self.line_spacing
offsety = cr * dy
padding_top = self.padding[1]
padding_bottom = self.padding[3]
viewport_height = self.height - padding_top - padding_bottom - dy
sy = self.scroll_y
if offsety > viewport_height + sy:
self.scroll_y = offsety - viewport_height
elif offsety < sy:
self.scroll_y = offsety
cursor = AliasProperty(_get_cursor, _set_cursor)
'''Tuple of (col, row) values indicating the current cursor position.
You can set a new (col, row) if you want to move the cursor. The scrolling
area will be automatically updated to ensure that the cursor is
visible inside the viewport.
:attr:`cursor` is an :class:`~kivy.properties.AliasProperty`.
'''
def _get_cursor_col(self):
return self._cursor[0]
cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', ))
'''Current column of the cursor.
:attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to
cursor[0], read-only.
'''
def _get_cursor_row(self):
return self._cursor[1]
cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', ))
'''Current row of the cursor.
:attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to
cursor[1], read-only.
'''
cursor_pos = AliasProperty(_get_cursor_pos, None,
bind=('cursor', 'padding', 'pos', 'size',
'focus', 'scroll_x', 'scroll_y',
'line_height', 'line_spacing'),
cache=False)
'''Current position of the cursor, in (x, y).
:attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`,
read-only.
'''
cursor_color = ColorProperty([1, 0, 0, 1])
'''Current color of the cursor, in (r, g, b, a) format.
.. versionadded:: 1.9.0
:attr:`cursor_color` is a :class:`~kivy.properties.ColorProperty` and
defaults to [1, 0, 0, 1].
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
cursor_width = NumericProperty('1sp')
'''Current width of the cursor.
.. versionadded:: 1.10.0
:attr:`cursor_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to '1sp'.
'''
line_height = NumericProperty(1)
'''Height of a line. This property is automatically computed from the
:attr:`font_name`, :attr:`font_size`. Changing the line_height will have
no impact.
.. note::
:attr:`line_height` is the height of a single line of text.
Use :attr:`minimum_height`, which also includes padding, to
get the height required to display the text properly.
:attr:`line_height` is a :class:`~kivy.properties.NumericProperty`,
read-only.
'''
tab_width = NumericProperty(4)
'''By default, each tab will be replaced by four spaces on the text
input widget. You can set a lower or higher value.
:attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 4.
'''
padding_x = VariableListProperty([0, 0], length=2, deprecated=True)
'''Horizontal padding of the text: [padding_left, padding_right].
padding_x also accepts a one argument form [padding_horizontal].
:attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_x(self, instance, value):
self.padding[0] = value[0]
self.padding[2] = value[1]
padding_y = VariableListProperty([0, 0], length=2, deprecated=True)
'''Vertical padding of the text: [padding_top, padding_bottom].
padding_y also accepts a one argument form [padding_vertical].
:attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_y(self, instance, value):
self.padding[1] = value[0]
self.padding[3] = value[1]
padding = VariableListProperty([6, 6, 6, 6])
'''Padding of the text: [padding_left, padding_top, padding_right,
padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced AliasProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [6, 6, 6, 6].
'''
halign = OptionProperty('auto', options=['left', 'center', 'right',
'auto'])
'''Horizontal alignment of the text.
:attr:`halign` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'auto'. Available options are : auto, left, center and right.
Auto will attempt to autodetect horizontal alignment for RTL text (Pango
only), otherwise it behaves like `left`.
.. versionadded:: 1.10.1
'''
scroll_x = NumericProperty(0)
'''X scrolling value of the viewport. The scrolling is automatically
updated when the cursor is moved or text changed. If there is no
user input, the scroll_x and scroll_y properties may be changed.
:attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
scroll_y = NumericProperty(0)
'''Y scrolling value of the viewport. See :attr:`scroll_x` for more
information.
:attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
selection_color = ColorProperty([0.1843, 0.6549, 0.8313, .5])
'''Current color of the selection, in (r, g, b, a) format.
.. warning::
The color should always have an "alpha" component less than 1
since the selection is drawn after the text.
:attr:`selection_color` is a :class:`~kivy.properties.ColorProperty` and
defaults to [0.1843, 0.6549, 0.8313, .5].
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_active`. Can be used for a custom background.
.. versionadded:: 1.4.1
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to (4, 4, 4, 4).
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput')
'''Background image of the TextInput when it's not in focus.
.. versionadded:: 1.4.1
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled')
'''Background image of the TextInput when disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled'.
'''
background_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_active')
'''Background image of the TextInput when it's in focus.
.. versionadded:: 1.4.1
:attr:`background_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_active'.
'''
background_color = ColorProperty([1, 1, 1, 1])
'''Current color of the background, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`background_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to [1, 1, 1, 1] (white).
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
foreground_color = ColorProperty([0, 0, 0, 1])
'''Current color of the foreground, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`foreground_color` is a :class:`~kivy.properties.ColorProperty`
and defaults to [0, 0, 0, 1] (black).
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
disabled_foreground_color = ColorProperty([0, 0, 0, .5])
'''Current color of the foreground when disabled, in (r, g, b, a) format.
.. versionadded:: 1.8.0
:attr:`disabled_foreground_color` is a
:class:`~kivy.properties.ColorProperty` and
defaults to [0, 0, 0, 5] (50% transparent black).
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
use_bubble = BooleanProperty(not _is_desktop)
'''Indicates whether the cut/copy/paste bubble is used.
.. versionadded:: 1.7.0
:attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
use_handles = BooleanProperty(not _is_desktop)
'''Indicates whether the selection handles are displayed.
.. versionadded:: 1.8.0
:attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
def get_sel_from(self):
return self._selection_from
selection_from = AliasProperty(get_sel_from, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_from` is an :class:`~kivy.properties.AliasProperty`
and defaults to None, readonly.
'''
def get_sel_to(self):
return self._selection_to
selection_to = AliasProperty(get_sel_to, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and
defaults to None, readonly.
'''
selection_text = StringProperty(u'')
'''Current content selection.
:attr:`selection_text` is a :class:`~kivy.properties.StringProperty`
and defaults to '', readonly.
'''
def on_selection_text(self, instance, value):
if value:
if self.use_handles:
self._trigger_show_handles()
if CutBuffer and not self.password:
self._trigger_update_cutbuffer()
def _get_text(self):
flags = self._lines_flags
lines = self._lines
len_lines = len(lines)
less_flags = len(flags) < len_lines
if less_flags:
flags.append(1)
text = ''.join(
('\n' if (flags[i] & FL_IS_LINEBREAK) else '') + lines[i]
for i in range(len_lines)
)
if less_flags:
flags.pop()
return text
def _set_text(self, text):
if isinstance(text, bytes):
text = text.decode('utf8')
if self.replace_crlf:
text = text.replace(u'\r\n', u'\n')
if self.text != text:
self._refresh_text(text)
self.cursor = self.get_cursor_from_index(len(text))
text = AliasProperty(_get_text, _set_text, bind=('_lines',), cache=True)
'''Text of the widget.
Creation of a simple hello world::
widget = TextInput(text='Hello world')
If you want to create the widget with an unicode string, use::
widget = TextInput(text=u'My unicode string')
:attr:`text` is an :class:`~kivy.properties.AliasProperty`.
'''
font_name = StringProperty(DEFAULT_FONT)
'''Filename of the font to use. The path can be absolute or relative.
Relative paths are resolved by the :func:`~kivy.resources.resource_find`
function.
.. warning::
Depending on your text provider, the font file may be ignored. However,
you can mostly use this without problems.
If the font used lacks the glyphs for the particular language/symbols
you are using, you will see '[]' blank box characters instead of the
actual glyphs. The solution is to use a font that has the glyphs you
need to display. For example, to display |unicodechar|, use a font like
freesans.ttf that has the glyph.
.. |unicodechar| image:: images/unicode-char.png
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'Roboto'. This value is taken
from :class:`~kivy.config.Config`.
'''
font_size = NumericProperty('15sp')
'''Font size of the text in pixels.
:attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 15 :attr:`~kivy.metrics.sp`.
'''
font_context = StringProperty(None, allownone=True)
'''Font context. `None` means the font is used in isolation, so you are
guaranteed to be drawing with the TTF file resolved by :attr:`font_name`.
Specifying a value here will load the font file into a named context,
enabling fallback between all fonts in the same context. If a font
context is set, you are not guaranteed that rendering will actually use
the specified TTF file for all glyphs (Pango will pick the one it
thinks is best).
If Kivy is linked against a system-wide installation of FontConfig,
you can load the system fonts by specifying a font context starting
with the special string `system://`. This will load the system
fontconfig configuration, and add your application-specific fonts on
top of it (this imposes a signifficant risk of family name collision,
Pango may not use your custom font file, but pick one from the system)
.. note::
This feature requires the Pango text provider.
.. versionadded:: 1.10.1
:attr:`font_context` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
font_family = StringProperty(None, allownone=True)
'''Font family, this is only applicable when using :attr:`font_context`
option. The specified font family will be requested, but note that it may
not be available, or there could be multiple fonts registered with the
same family. The value can be a family name (string) available in the
font context (for example a system font in a `system://` context, or a
custom font file added using :class:`kivy.core.text.FontContextManager`).
If set to `None`, font selection is controlled by the :attr:`font_name`
setting.
.. note::
If using :attr:`font_name` to reference a custom font file, you
should leave this as `None`. The family name is managed automatically
in this case.
.. note::
This feature requires the Pango text provider.
.. versionadded:: 1.10.1
:attr:`font_family` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
base_direction = OptionProperty(
None,
options=['ltr', 'rtl', 'weak_rtl', 'weak_ltr', None],
allownone=True
)
'''Base direction of text, this impacts horizontal alignment when
:attr:`halign` is `auto` (the default). Available options are: None,
"ltr" (left to right), "rtl" (right to left) plus "weak_ltr" and
"weak_rtl".
.. note::
This feature requires the Pango text provider.
.. note::
Weak modes are currently not implemented in Kivy text layout, and
have the same effect as setting strong mode.
.. versionadded:: 1.10.1
:attr:`base_direction` is an :class:`~kivy.properties.OptionProperty` and
defaults to None (autodetect RTL if possible, otherwise LTR).
'''
text_language = StringProperty(None, allownone=True)
'''Language of the text, if None Pango will determine it from locale.
This is an RFC-3066 format language tag (as a string), for example
"en_US", "zh_CN", "fr" or "ja". This can impact font selection, metrics
and rendering. For example, the same bytes of text can look different
for `ur` and `ar` languages, though both use Arabic script.
.. note::
This feature requires the Pango text provider.
.. versionadded:: 1.10.1
:attr:`text_language` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
_hint_text = StringProperty('')
def _set_hint_text(self, value):
if isinstance(value, bytes):
value = value.decode('utf8')
self._hint_text = value
def _get_hint_text(self):
return self._hint_text
hint_text = AliasProperty(
_get_hint_text, _set_hint_text, bind=('_hint_text', ))
'''Hint text of the widget, shown if text is ''.
.. versionadded:: 1.6.0
.. versionchanged:: 1.10.0
The property is now an AliasProperty and byte values are decoded to
strings. The hint text will stay visible when the widget is focused.
:attr:`hint_text` a :class:`~kivy.properties.AliasProperty` and defaults
to ''.
'''
hint_text_color = ColorProperty([0.5, 0.5, 0.5, 1.0])
'''Current color of the hint_text text, in (r, g, b, a) format.
.. versionadded:: 1.6.0
:attr:`hint_text_color` is a :class:`~kivy.properties.ColorProperty` and
defaults to [0.5, 0.5, 0.5, 1.0] (grey).
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
auto_indent = BooleanProperty(False)
'''Automatically indent multiline text.
.. versionadded:: 1.7.0
:attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
replace_crlf = BooleanProperty(True)
'''Automatically replace CRLF with LF.
.. versionadded:: 1.9.1
:attr:`replace_crlf` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
allow_copy = BooleanProperty(True)
'''Decides whether to allow copying the text.
.. versionadded:: 1.8.0
:attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def _get_min_height(self):
return (
len(self._lines) * (self.line_height + self.line_spacing)
+ self.padding[1]
+ self.padding[3]
)
minimum_height = AliasProperty(
_get_min_height,
bind=(
'_lines', 'line_spacing', 'padding', 'font_size', 'font_name',
'password', 'font_context', 'hint_text', 'line_height'
),
cache=True
)
'''Minimum height of the content inside the TextInput.
.. versionadded:: 1.8.0
:attr:`minimum_height` is a readonly
:class:`~kivy.properties.AliasProperty`.
.. warning::
:attr:`minimum_width` is calculated based on :attr:`width` therefore
code like this will lead to an infinite loop::
<FancyTextInput>:
height: self.minimum_height
width: self.height
'''
line_spacing = NumericProperty(0)
'''Space taken up between the lines.
.. versionadded:: 1.8.0
:attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
input_filter = ObjectProperty(None, allownone=True)
''' Filters the input according to the specified mode, if not None. If
None, no filtering is applied.
.. versionadded:: 1.9.0
:attr:`input_filter` is an :class:`~kivy.properties.ObjectProperty` and
defaults to `None`. Can be one of `None`, `'int'` (string), or `'float'`
(string), or a callable. If it is `'int'`, it will only accept numbers.
If it is `'float'` it will also accept a single period. Finally, if it is
a callable it will be called with two parameters; the string to be added
and a bool indicating whether the string is a result of undo (True). The
callable should return a new substring that will be used instead.
'''
handle_image_middle = StringProperty(
'atlas://data/images/defaulttheme/selector_middle')
'''Image used to display the middle handle on the TextInput for cursor
positioning.
.. versionadded:: 1.8.0
:attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/selector_middle'.
'''
def on_handle_image_middle(self, instance, value):
if self._handle_middle:
self._handle_middle.source = value
handle_image_left = StringProperty(
'atlas://data/images/defaulttheme/selector_left')
'''Image used to display the Left handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/selector_left'.
'''
def on_handle_image_left(self, instance, value):
if self._handle_left:
self._handle_left.source = value
handle_image_right = StringProperty(
'atlas://data/images/defaulttheme/selector_right')
'''Image used to display the Right handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_right` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/selector_right'.
'''
def on_handle_image_right(self, instance, value):
if self._handle_right:
self._handle_right.source = value
write_tab = BooleanProperty(True)
'''Whether the tab key should move focus to the next widget or if it should
enter a tab in the :class:`TextInput`. If `True` a tab will be written,
otherwise, focus will move to the next widget.
.. versionadded:: 1.9.0
:attr:`write_tab` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
if __name__ == '__main__':
from textwrap import dedent
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
KV = dedent(r'''
#:set font_size '20dp'
BoxLayout:
orientation: 'vertical'
padding: '20dp'
spacing: '10dp'
TextInput:
font_size: font_size
size_hint_y: None
height: self.minimum_height
multiline: False
text: 'monoline'
TextInput:
size_hint_y: None
font_size: font_size
height: self.minimum_height
multiline: False
password: True
password_mask: '•'
text: 'password'
TextInput:
font_size: font_size
size_hint_y: None
height: self.minimum_height
multiline: False
readonly: True
text: 'readonly'
TextInput:
font_size: font_size
size_hint_y: None
height: self.minimum_height
multiline: False
disabled: True
text: 'disabled'
TextInput:
font_size: font_size
hint_text: 'normal with hint text'
TextInput:
font_size: font_size
text: 'default'
TextInput:
font_size: font_size
text: 'bubble & handles'
use_bubble: True
use_handles: True
TextInput:
font_size: font_size
text: 'no wrap'
do_wrap: False
TextInput:
font_size: font_size
text: 'multiline\nreadonly'
disabled: app.time % 5 < 2.5
''')
class TextInputApp(App):
time = NumericProperty()
def build(self):
Clock.schedule_interval(self.update_time, 0)
return Builder.load_string(KV)
def update_time(self, dt):
self.time += dt
TextInputApp().run()
| 34.047368
| 89
| 0.579927
|
884ec6b3cf25d0d478fd34d4bc12c55ed0c331f2
| 4,013
|
py
|
Python
|
ceilometer/plugin.py
|
dreamhost/ceilometer
|
f9fda50f0bf09ab7bd55df0a17b2ec8e31ef3b71
|
[
"Apache-2.0"
] | 1
|
2021-11-22T11:00:53.000Z
|
2021-11-22T11:00:53.000Z
|
ceilometer/plugin.py
|
dreamhost/ceilometer
|
f9fda50f0bf09ab7bd55df0a17b2ec8e31ef3b71
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/plugin.py
|
dreamhost/ceilometer
|
f9fda50f0bf09ab7bd55df0a17b2ec8e31ef3b71
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for plugins.
"""
import abc
import collections
ExchangeTopics = collections.namedtuple('ExchangeTopics',
['exchange', 'topics'])
class PluginBase(object):
"""Base class for all plugins.
"""
def is_enabled(self):
"""Return boolean indicating whether this plugin should
be enabled and used by the caller.
"""
return True
class NotificationBase(PluginBase):
"""Base class for plugins that support the notification API."""
__metaclass__ = abc.ABCMeta
def is_enabled(self):
return True
@abc.abstractmethod
def get_event_types(self):
"""Return a sequence of strings defining the event types to be
given to this plugin."""
@abc.abstractmethod
def get_exchange_topics(self, conf):
"""Return a sequence of ExchangeTopics defining the exchange and
topics to be connected for this plugin.
:param conf: Configuration.
"""
@abc.abstractmethod
def process_notification(self, message):
"""Return a sequence of Counter instances for the given message.
:param message: Message to process."""
def notification_to_metadata(self, event):
"""Transform a payload dict to a metadata dict."""
metadata = dict([(k, event['payload'].get(k))
for k in self.metadata_keys])
metadata['event_type'] = event['event_type']
metadata['host'] = event['publisher_id']
return metadata
class PollsterBase(PluginBase):
"""Base class for plugins that support the polling API."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_counter_names(self):
"""Return a sequence of Counter names supported by the pollster."""
@abc.abstractmethod
def get_counters(self, manager, instance):
"""Return a sequence of Counter instances from polling the
resources."""
class PublisherBase(PluginBase):
"""Base class for plugins that publish the sampler."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def publish_counters(self, context, counters, source):
"Publish counters into final conduit."
class TransformerBase(PluginBase):
"""Base class for plugins that transform the counter."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def handle_sample(self, context, counter, source):
"""Transform a counter.
:param context: Passed from the data collector.
:param counter: A counter.
:param source: Passed from data collector.
"""
def flush(self, context, source):
"""Flush counters cached previously.
:param context: Passed from the data collector.
:param source: Source of counters that are being published."""
return []
def __init__(self, **kwargs):
"""Setup transformer.
Each time a transformed is involved in a pipeline, a new transformer
instance is created and chained into the pipeline. i.e. transformer
instance is per pipeline. This helps if transformer need keep some
cache and per-pipeline information.
:param kwargs: The parameters that are defined in pipeline config file.
"""
super(TransformerBase, self).__init__()
| 30.172932
| 79
| 0.668826
|
26efffc5f6357278704ad4a3cc98a2c4c7b19b3f
| 21,998
|
py
|
Python
|
iota/components/iota_utils.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iota/components/iota_utils.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iota/components/iota_utils.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division, print_function, absolute_import
from past.builtins import range
'''
Author : Lyubimov, A.Y.
Created : 12/19/2016
Last Changed: 11/05/2018
Description : Module with basic utilities of broad applications in IOTA
'''
import os
import sys
from collections import Counter
import wx
from cctbx import miller
assert miller
from libtbx import easy_pickle as ep, easy_run
# for Py3 compatibility
from io import BytesIO
try:
import itertools.izip as zip
except ImportError:
pass
# For testing
import time
assert time
# Platform-specific stuff
# TODO: Will need to test this on Windows at some point
if wx.Platform == '__WXGTK__':
plot_font_size = 10
norm_font_size = 10
button_font_size = 12
LABEL_SIZE = 14
CAPTION_SIZE = 12
python = 'python'
elif wx.Platform == '__WXMAC__':
plot_font_size = 9
norm_font_size = 12
button_font_size = 14
LABEL_SIZE = 14
CAPTION_SIZE = 12
python = "Python"
elif (wx.Platform == '__WXMSW__'):
plot_font_size = 9
norm_font_size = 9
button_font_size = 11
LABEL_SIZE = 11
CAPTION_SIZE = 9
python = "Python" # TODO: make sure it's right!
# --------------------------- Miscellaneous Utils ---------------------------- #
def noneset(value):
if value == '':
return 'None'
elif 'none' in str(value).lower():
return "None"
elif value is None:
return "None"
else:
return value
def makenone(value):
if str(value).lower() in ('none', ''):
return None
else:
return str(value)
class UnicodeCharacters():
def __init__(self):
self.alpha = u'\N{GREEK SMALL LETTER ALPHA}'.encode('utf-8')
self.beta = u'\N{GREEK SMALL LETTER BETA}'.encode('utf-8')
self.gamma = u'\N{GREEK SMALL LETTER GAMMA}'.encode('utf-8')
self.sigma = u'\N{GREEK SMALL LETTER SIGMA}'.encode('utf-8')
class WxFlags():
def __init__(self):
self.stack = wx.TOP | wx.RIGHT | wx.LEFT
self.expand = wx.TOP | wx.RIGHT | wx.LEFT | wx.EXPAND
class Capturing(list):
""" Class used to capture stdout from cctbx.xfel objects. Saves output in
appendable list for potential logging.
"""
def __enter__(self):
self._stdout = sys.stdout
self._stderr = sys.stderr
sys.stdout = self._stringio_stdout = BytesIO()
sys.stderr = self._stringio_stderr = BytesIO()
return self
def __exit__(self, *args):
self.extend(self._stringio_stdout.getvalue().splitlines())
sys.stdout = self._stdout
self.extend(self._stringio_stderr.getvalue().splitlines())
sys.stderr = self._stderr
def convert_phil_to_text(phil, phil_file=None, att_level=0):
""" Reads in a PHIL object and converts it to plain text; optionally writes
out to text file if filepath is provided
:param phil: PHIL object
:param phil_file: absolute filepath for text file with parameters
:return: PHIL text string
"""
with Capturing() as output:
phil.show(attributes_level=att_level)
txt_out = ''
for one_output in output:
txt_out += one_output + '\n'
if phil_file:
with open(phil_file, 'w') as pf:
pf.write(txt_out)
return txt_out
def get_mpi_rank_and_size():
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
return rank, size
def main_log(logfile, entry, print_tag=False):
""" Write main log (so that I don't have to repeat this every time). All this
is necessary so that I don't have to use the Python logger module, which
creates a lot of annoying crosstalk with other cctbx.xfel modules.
"""
if logfile is not None:
with open(logfile, 'a') as lf:
lf.write('{}\n'.format(entry))
if print_tag:
print (entry)
def set_base_dir(dirname=None, sel_flag=False, out_dir=None):
""" Generates a base folder for converted pickles and/or grid search and/or
integration results; creates subfolder numbered one more than existing
"""
if out_dir is None and dirname is not None:
path = os.path.abspath(os.path.join(os.curdir, dirname))
elif out_dir is not None and dirname is None:
path = os.path.abspath(out_dir)
elif out_dir is None and dirname is None:
path = os.path.abspath(os.curdir)
else:
path = os.path.join(os.path.abspath(out_dir), dirname)
if os.path.isdir(path):
dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]
dirnums = [int(d) for d in dirs if d.isdigit()]
if len(dirnums) > 0:
n_top = max(dirnums)
if sel_flag:
new_path = "{}/{:03d}".format(path, n_top)
else:
new_path = "{}/{:03d}".format(path, n_top + 1)
else:
new_path = "{}/{:03d}".format(path, 1)
else:
new_path = "{}/001".format(path)
return new_path
def find_base_dir(dirname):
""" Function to determine the current folder name """
def check_dirname(path, subdirname):
if os.path.isdir(os.path.join(path, subdirname)):
try:
int(subdirname)
return True
except ValueError:
return False
else:
return False
path = os.path.abspath(os.path.join(os.curdir, dirname))
if os.path.isdir(path):
if len(os.listdir(path)) > 0:
dirs = [int(i) for i in os.listdir(path) if check_dirname(path, i)]
found_path = "{}/{:03d}".format(path, max(dirs))
else:
found_path = path
else:
found_path = os.curdir
return found_path
def make_image_path(raw_img, input_base, base_path):
""" Makes path for output images """
path = os.path.dirname(raw_img)
relpath = os.path.relpath(path, input_base)
if relpath == '.':
dest_folder = base_path
else:
dest_folder = os.path.join(base_path, relpath)
return os.path.normpath(dest_folder)
# return dest_folder
def make_filename(path, prefix=None, suffix=None, new_ext=None):
bname = os.path.basename(path)
ext = bname.split(os.extsep)[-1]
if ext.isdigit():
filename = bname
else:
fn_list = bname.split('.')
filename = '.'.join(fn_list[0:-1])
if prefix:
filename = "{}_{}".format(prefix, filename)
if suffix:
filename = "{}_{}".format(filename, suffix)
if new_ext:
filename = "{}.{}".format(filename, new_ext)
return filename
def iota_exit(silent=False):
if not silent:
from iota import iota_version, now
print ('\n\nIOTA version {0}'.format(iota_version))
print ('{}\n'.format(now))
sys.exit()
# ------------------------------ Input Finder -------------------------------- #
class InputFinder():
def __init__(self):
self.images = ['cbf', 'img', 'corr', 'mccd', 'marccd', 'mar']
self.datafiles = ['mtz', 'hkl']
self.sequences = ['seq', 'fasta']
self.pickles = ['pickle', 'int']
self.texts = ['txt', 'lst', 'phil', 'param']
self.pickle_type = None
def get_file_list(self, path,
as_string=False,
ignore_ext=None,
ext_only=None,
last=None,
min_back=None):
""" Runs the 'find' command to recursively get a list of filepaths. Has
a few advangages over os.walk():
1. Faster (by quite a lot when lots of files involved)
2. Automatically recursive
3. Automatically returns absolute paths
4. Can be further modified in command line (size of file, wildcards, etc.)
:param min_back:
:param as_string: boolean, if true will return file list as a string, if false, as list
:param ignore_ext: will ignore extensions as supplied
:param ext_only: will only find files with these extensions
:param path: path to all data (top of folder tree)
:param last: path to last file in a previously-generated input list (
useful when using this to look for new files in the same folder)
:return filepaths: list of absolute file paths
"""
if last is not None:
newer_than = '-newer {}'.format(last)
else:
newer_than = ''
if min_back is not None:
mmin = '-mmin {}'.format(min_back)
else:
mmin = ''
command = 'find {} -type f {} {}'.format(path, newer_than, mmin)
filepaths = easy_run.fully_buffered(command).stdout_lines
if ignore_ext is not None:
filepaths = [path for path in filepaths if not path.endswith(ignore_ext)]
elif ext_only is not None:
filepaths = [path for path in filepaths if path.endswith(ext_only)]
filepaths = [path for path in filepaths if not
os.path.basename(path).startswith('.')]
if as_string:
return '\n'.join(filepaths)
return filepaths
def read_files_with_oswalk(self, path):
"""os.walk() is typically slower than using 'find', but I am keeping it
here just in case (and for easy switching)
:param path:
:return filelist: a list of absolute paths of files
"""
filelist = []
for root, folder, files in os.walk(path):
filepaths = [os.path.join(os.path.abspath(path), f) for f in files]
filelist.extend(filepaths)
return filelist
def identify_file_type(self, filepath):
""" This will attempt to identify the filetype using several consequtive
methods
:param filepath: input filepath
:return filetype: identified type of file
"""
filetype = self.test_extension(filepath)
if filetype == 'unidentified':
filetype = self.test_file(filepath)
if filetype == 'text':
filetype = self.test_text(filepath)
return filetype
def test_extension(self, filepath):
# Check extensions
filetype = 'unidentified'
filename = os.path.basename(filepath)
ext = filename.split(os.extsep)[1:] # NOTE: may end up with multiple extensions
for e in ext:
e = e.lower().replace(' ', '')
if e in self.images or e.isdigit():
filetype = 'raw image'
elif e in self.sequences:
filetype = 'sequence'
elif e in self.pickles:
if self.pickle_type is None:
self.pickle_type = self.test_pickle(filepath)
filetype = self.pickle_type
elif e == 'int':
filetype = 'image object'
elif e in self.texts:
filetype = 'text'
elif e == 'mtz':
filetype = 'data (MTZ)'
elif e == 'hkl':
filetype = 'data (HKL)'
elif e == 'pdb':
filetype = 'coordinates'
else:
filetype = 'unidentified'
return filetype
def test_file(self, filepath):
filename = os.path.basename(filepath)
ext = filename.split(os.extsep)[1:] # NOTE: may end up with multiple extensions
# Check type using 'file' command (slow but do once per extension)
raw_type = \
easy_run.fully_buffered('file {}'.format(filepath)).stdout_lines
raw_type = raw_type[0].split(':')[1]
if '8086 relocatable' in raw_type.lower():
self.pickle_type = self.test_pickle(filepath)
filetype = self.pickle_type
self.pickles.extend('.'.join(ext))
elif 'mar area detector image' in raw_type.lower():
filetype = 'raw image'
self.images.append('.'.join(ext))
elif raw_type.lower() == ' data':
filetype = 'raw image'
self.images.append('.'.join(ext))
elif 'text' in raw_type.lower():
filetype = 'text'
self.texts.append('.'.join(ext))
else:
filetype = 'unidentified'
return filetype
def test_text(self, path):
""" Test the contents of a text file for it being a
1. List of paths (i.e. an input file)
2. A PHIL type file
:param path: path to file
:return: filetype determined from testing a text file
"""
with open(path, 'r') as tf:
contents = tf.readlines()
# TODO: Find a better way to catch an old-timey format
# if '\r' in contents[0]:
# contents = contents[0].split('\r')
contents = [i.replace('\n', '') for i in contents][:1000]
content_test = [os.path.isfile(i) for i in contents]
try:
if Counter(content_test).most_common(1)[0][0]:
return 'file list'
else:
return self.test_phil(path)
except IndexError:
return 'unidentified'
def test_pickle(self, path):
# Test if pickle, and if so, if it's an image or processed pickle
pickle = ep.load(path)
try:
if 'DATA' in pickle:
return 'image pickle'
elif 'observations' in pickle:
return 'processed pickle'
else:
return 'pickle'
except TypeError:
if hasattr(pickle, 'process'):
return 'image object'
else:
return 'pickle'
def test_phil(self, filepath):
""" Tests incoming PHIL file to try and determine what it's for """
import iotbx.phil as ip
try:
test_phil = ip.parse(open(filepath).read())
# Test if IOTA parameter file
from iota.components.iota_input import master_phil as iota_phil
new_phil, unused = iota_phil.fetch(sources=[test_phil],
track_unused_definitions=True)
if len(unused) == 0:
return 'IOTA settings'
# Test if PRIME parameter file
from prime.postrefine.mod_input import master_phil as prime_phil
new_phil, unused = prime_phil.fetch(sources=[test_phil],
track_unused_definitions=True)
if len(unused) == 0:
return 'PRIME settings'
# Test if LABELIT target file
from labelit.phil_preferences import iotbx_defs, libtbx_defs
labelit_phil = ip.parse(input_string=iotbx_defs + libtbx_defs,
process_includes=True)
new_phil, unused = labelit_phil.fetch(sources=[test_phil],
track_unused_definitions=True)
if len(unused) == 0:
return 'LABELIT target'
# Test if DIALS target file
from dials.command_line.stills_process import control_phil_str, \
dials_phil_str
dials_phil = ip.parse(control_phil_str + dials_phil_str,
process_includes=True)
new_phil, unused = dials_phil.fetch(sources=[test_phil],
track_unused_definitions=True)
if len(unused) == 0:
return 'DIALS target'
else:
return 'text'
except Exception:
return 'text'
def get_input(self, path, filter=True, filter_type='image', last=None,
min_back=None):
""" Obtain list of files (or single file) from any input; obtain file type in input
:param filter:
:param filter_type:
:param last:
:param min_back:
:param path: path to input file(s) or folder(s)
:return: input_list: list of input file(s) (could be just one file)
input_type: type of input file(s)
"""
input_list = None
input_type = None
suffix = 'file'
if os.path.isfile(path):
input_type = self.identify_file_type(path)
if input_type == 'file list':
with open(path, 'r') as f:
input_list = [i.rstrip('\n') for i in f.readlines()]
suffix = 'list'
else:
input_list = [os.path.abspath(path)]
elif os.path.isdir(path):
input_list = self.get_file_list(path, last=last, min_back=min_back)
suffix = "folder"
if input_list is None:
return [], None
if len(input_list) > 0:
input_pairs = [(filepath, self.identify_file_type(filepath)) for
filepath in input_list]
if filter:
input_pairs = [i for i in input_pairs if filter_type in i[1]]
input_pairs = [i for i in input_pairs if not '_tmp' in i[0]]
if len(input_pairs) > 0:
input_list = [i[0] for i in input_pairs]
input_types = [i[1] for i in input_pairs]
consensus_type = Counter(input_types).most_common(1)[0][0]
input_type = '{} {}'.format(consensus_type, suffix)
else:
return [], None
# sort input by filename and ensure type is str and not unicode
input_list = list(map(str, sorted(input_list, key=lambda i: i)))
return input_list, input_type
def get_folder_type(self, path):
if os.path.isdir(path):
file_list = self.get_file_list(path)
input_types = [self.identify_file_type(f) for f in file_list]
folder_type = '{} folder'.format(Counter(input_types).most_common(1)[0][0])
return folder_type
else:
return 'unknown'
def get_file_type(self, path):
if os.path.isfile(path):
file_type = self.identify_file_type(path)
if file_type == 'file list':
with open(path, 'r') as f:
input_list = [i.rstrip('\n') for i in f.readlines()]
input_types = [self.identify_file_type(f) for f in input_list]
consensus_type = Counter(input_types).most_common(1)[0][0]
input_type = '{} list'.format(consensus_type)
else:
input_type = file_type
return input_type
else:
return 'unknown'
def make_input_list(self, input_entries,
filter=False,
filter_type=None,
last=None,
min_back=None):
""" Makes input list from multiple entries
:param filter:
:param filter_type:
:param last:
:param min_back:
:param input_entries: a list of input paths
:return: input list: a list of input files
"""
input_list = []
for path in input_entries:
if path is not None:
filepaths, _ = self.get_input(path, filter=filter,
filter_type=filter_type,
last=last,
min_back=min_back)
input_list.extend(filepaths)
return input_list
class ObjectFinder(object):
""" A class for finding pickled IOTA image objects and reading in their
contents; outputs a list of Python objects containing information about
individual images, including a list of integrated intensities """
def __init__(self):
""" Constructor """
self.ginp = InputFinder()
def find_objects(self, obj_folder, read_object_files=None,
find_old=False, finished_only=False):
""" Seek and import IOTA image objects
:param finished_only:
:param obj_folder: path to objects (which can be in subfolders)
:param read_object_files: list of already-read-in objects
:param find_old: find all objects in folder, regardless of other settings
:return: list of image objects
"""
if find_old:
min_back = None
else:
min_back = -1
# Find objects and filter out already-read objects if any
object_files = self.ginp.get_file_list(obj_folder,
ext_only='int',
min_back=min_back)
if read_object_files is not None:
new_object_files = list(set(object_files) - set(read_object_files))
else:
new_object_files = object_files
# For backwards compatibility, read and append observations to objects
new_objects = [self.read_object_file(i) for i in new_object_files]
new_finished_objects = [i for i in new_objects if
i is not None and i.status == 'final']
if finished_only:
return new_finished_objects
else:
return new_objects
def read_object_file(self, filepath):
""" Load pickled image object; if necessary, extract observations from
the image pickle associated with object, and append to object
:param filepath: path to image object file
:return: read-in (and modified) image object
"""
try:
object = ep.load(filepath)
if object.final['final'] is not None:
pickle_path = object.final['final']
if os.path.isfile(pickle_path):
pickle = ep.load(pickle_path)
object.final['observations'] = pickle['observations'][0]
return object
except Exception, e:
print ('OBJECT_IMPORT_ERROR for {}: {}'.format(filepath, e))
return None
# ---------------------------------- Other ----------------------------------- #
class RadAverageCalculator(object):
def __init__(self, image=None, datablock=None):
if (image is None and datablock is None):
print ('ERROR: Need image or datablock for Radial Average Calculator')
return
if datablock is None:
from dxtbx.datablock import DataBlockFactory
self.datablock = DataBlockFactory.from_filenames([image])[0]
else:
self.datablock = datablock
def make_radial_average(self, num_bins=None, hires=None, lowres=None):
from dials.algorithms.background import RadialAverage
imageset = self.datablock.extract_imagesets()[0]
beam = imageset.get_beam()
detector = imageset.get_detector()
scan_range = (0, len(imageset))
summed_data = None
summed_mask = None
for i in range(*scan_range):
data = imageset.get_raw_data(i)
mask = imageset.get_mask(i)
assert isinstance(data, tuple)
assert isinstance(mask, tuple)
if summed_data is None:
summed_mask = mask
summed_data = data
else:
summed_data = [ sd + d for sd, d in zip(summed_data, data) ]
summed_mask = [ sm & m for sm, m in zip(summed_mask, mask) ]
if num_bins is None:
num_bins = int(sum(sum(p.get_image_size()) for p in detector) / 50)
if lowres is not None:
vmin = (1 / lowres) ** 2
else:
vmin = 0
if hires is not None:
vmax = (1 / hires) ** 2
else:
vmax = (1 / detector.get_max_resolution(beam.get_s0())) ** 2
# Compute the radial average
radial_average = RadialAverage(beam, detector, vmin, vmax, num_bins)
for d, m in zip(summed_data, summed_mask):
radial_average.add(d.as_double() / (scan_range[1] - scan_range[0]), m)
mean = radial_average.mean()
reso = radial_average.inv_d2()
return mean, reso
class IOTATermination(Exception):
def __init__(self, termination):
Exception.__init__(self, termination)
class InputError(Exception):
def __init__(self, termination):
Exception.__init__(self, termination)
| 32.067055
| 91
| 0.63533
|
001e4ec353acf96921d541f21b1ae72cbc2b1dad
| 12,334
|
py
|
Python
|
tests/volterra/test_tools.py
|
d-bouvier/pyvi
|
6b38bfaed75f84f6bf2ef43b11535510ee1c0490
|
[
"BSD-3-Clause"
] | 16
|
2018-06-24T03:42:56.000Z
|
2022-03-31T08:31:01.000Z
|
tests/volterra/test_tools.py
|
d-bouvier/pyvi
|
6b38bfaed75f84f6bf2ef43b11535510ee1c0490
|
[
"BSD-3-Clause"
] | null | null | null |
tests/volterra/test_tools.py
|
d-bouvier/pyvi
|
6b38bfaed75f84f6bf2ef43b11535510ee1c0490
|
[
"BSD-3-Clause"
] | 3
|
2019-03-21T01:18:39.000Z
|
2021-12-02T00:50:20.000Z
|
# -*- coding: utf-8 -*-
"""
Test script for pyvi/volterra/tools.py
Notes
-----
Developed for Python 3.6
@author: Damien Bouvier (Damien.Bouvier@ircam.fr)
"""
#==============================================================================
# Importations
#==============================================================================
import unittest
import itertools
import numpy as np
from pyvi.volterra.tools import (kernel_nb_coeff, series_nb_coeff, vec2kernel,
vec2series, kernel2vec)
from pyvi.utilities.mathbox import binomial
#==============================================================================
# Test Class
#==============================================================================
class KernelNbCoeffTest(unittest.TestCase):
def setUp(self):
self.Nmax = 5
self.Mmax = 20
self.iter_obj = itertools.product(range(1, self.Nmax+1),
range(self.Mmax))
def test_nb_coeff_symmetric_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = kernel_nb_coeff(N, M, form='sym')
self.assertEqual(nb_coeff, binomial(M + N - 1, N))
def test_nb_coeff_triangular_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = kernel_nb_coeff(N, M, form='tri')
self.assertEqual(nb_coeff, binomial(M + N - 1, N))
def test_nb_coeff_raw_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = kernel_nb_coeff(N, M, form=None)
self.assertEqual(nb_coeff, M**N)
class SeriesNbCoeffTest(unittest.TestCase):
def setUp(self):
self.Nmax = 5
self.Mmax = 5
self.M_list = [10, 0, 3, 0, 2]
self.M_list_results = [('sym', 26), ('tri', 26), (None, 69)]
self.form_list = [None, None, 'sym', 'tri', 'sym']
self.iter_obj = itertools.product(range(1, self.Nmax+1),
range(self.Mmax))
def test_nb_coeff_symmetric_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, form='sym')
self.assertEqual(nb_coeff, binomial(M + N, N) - 1)
def test_nb_coeff_triangular_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, form='tri')
self.assertEqual(nb_coeff, binomial(M + N, N) - 1)
def test_nb_coeff_raw_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, form=None)
self.assertEqual(nb_coeff, sum([M**n for n in range(1, N+1)]))
def test_form_as_list(self):
M = self.Mmax
N = self.Nmax
val = binomial(M + N, N) - 1 + binomial(M, 2)
nb_coeff = series_nb_coeff(N, M, form=self.form_list)
self.assertEqual(nb_coeff, val)
def test_M_as_list(self):
for form, val in self.M_list_results:
with self.subTest(i=form):
nb_coeff = series_nb_coeff(len(self.M_list), self.M_list,
form=form)
self.assertEqual(nb_coeff, val)
def test_out_by_order_type(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, out_by_order=True)
self.assertIsInstance(nb_coeff, list)
def test_out_by_order_length(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, out_by_order=True)
self.assertEqual(len(nb_coeff), N)
class Vec2KernelTest(unittest.TestCase):
def setUp(self):
self.M = 4
self.h_vec = {2: np.arange(1, binomial(self.M + 1, 2)+1),
3: np.arange(1, binomial(self.M + 2, 3)+1)}
self.h_tri = {2: np.array([[1, 2, 3, 4],
[0, 5, 6, 7],
[0, 0, 8, 9],
[0, 0, 0, 10]]),
3: np.array([[[1, 2, 3, 4],
[0, 5, 6, 7],
[0, 0, 8, 9],
[0, 0, 0, 10]],
[[0, 0, 0, 0],
[0, 11, 12, 13],
[0, 0, 14, 15],
[0, 0, 0, 16]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 17, 18],
[0, 0, 0, 19]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 20]]])}
self.h_sym = {2: np.array([[1, 1, 1.5, 2],
[1, 5, 3, 3.5],
[1.5, 3, 8, 4.5],
[2, 3.5, 4.5, 10]]),
3: np.array([[[1., 2/3, 1, 4/3],
[2/3, 5/3, 1, 7/6],
[1, 1, 8/3, 1.5],
[4/3, 7/6, 1.5, 10/3]],
[[2/3, 5/3, 1, 7/6],
[5/3, 11, 4, 13/3],
[1, 4, 14/3, 2.5],
[7/6, 13/3, 2.5, 16/3]],
[[1, 1, 8/3, 1.5],
[1, 4, 14/3, 2.5],
[8/3, 14/3, 17, 6],
[1.5, 2.5, 6, 19/3]],
[[4/3, 7/6, 1.5, 10/3],
[7/6, 13/3, 2.5, 16/3],
[1.5, 2.5, 6, 19/3],
[10/3, 16/3, 19/3, 20]]])}
def test_triangular_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = vec2kernel(self.h_vec[n], n, self.M, form='tri')
self.assertTrue(np.all(result == self.h_tri[n]))
def test_symmetric_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = vec2kernel(self.h_vec[n], n, self.M, form='sym')
self.assertTrue(np.all(result == self.h_sym[n]))
def test_None_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = vec2kernel(self.h_vec[n], n, self.M, form=None)
self.assertTrue(np.all(result == self.h_tri[n]))
def test_error_raised(self):
n = 2
self.assertRaises(ValueError, vec2kernel, self.h_vec[n], n+1, self.M)
class Vec2SeriesErrorTest(unittest.TestCase):
def test_error_raised_if_wrong_type(self):
f = list()
self.assertRaises(TypeError, vec2series, f, 3, 3)
class Vec2SeriesTest(Vec2KernelTest):
test_error_raised = property()
def setUp(self):
super().setUp()
self.N = 3
self.h_vec[1] = np.arange(1, self.M+1)
self.f = {1: self.h_vec[1], 2: self.h_vec[2], 3: self.h_vec[3]}
self.h_tri[1] = self.h_vec[1]
self.h_sym[1] = self.h_vec[1]
def test_triangular_form(self):
kernels = vec2series(self.h_vec, self.N, self.M, form='tri')
result = [np.all(h == self.h_tri[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
def test_symmetric_form(self):
kernels = vec2series(self.h_vec, self.N, self.M, form='sym')
result = [np.all(h == self.h_sym[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
def test_None_form(self):
kernels = vec2series(self.h_vec, self.N, self.M, form=None)
result = [np.all(h == self.h_tri[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
class Vec2Series_F_AsVector_Test(Vec2SeriesTest):
def setUp(self):
super().setUp()
self.h_vec = np.concatenate([f for n, f in sorted(self.h_vec.items())],
axis=0)
class Vec2Series_M_AsList_Test(Vec2SeriesTest):
def setUp(self):
super().setUp()
self.M = [4, 3, 2]
self.h_vec = {1: np.arange(1, binomial(self.M[0], 1)+1),
2: np.arange(1, binomial(self.M[1]+1, 2)+1),
3: np.arange(1, binomial(self.M[2]+2, 3)+1)}
self.h_tri = {1: np.array([1, 2, 3, 4]),
2: np.array([[1, 2, 3],
[0, 4, 5],
[0, 0, 6]]),
3: np.array([[[1, 2],
[0, 3]],
[[0, 0],
[0, 4]]])}
self.h_sym = {1: np.array([1, 2, 3, 4]),
2: np.array([[1., 1, 3/2],
[1, 4, 5/2],
[3/2, 5/2, 6]]),
3: np.array([[[1., 2/3],
[2/3, 1]],
[[2/3, 1],
[1, 4]]])}
class Vec2Series_Form_AsList_Test(Vec2KernelTest):
test_triangular_form = property()
test_symmetric_form = property()
test_None_form = property()
def setUp(self):
super().setUp()
self.N = 3
self.form = ['sym', 'tri', None]
self.h_vec[1] = np.arange(1, self.M+1)
self.h = dict()
self.h[1] = self.h_vec[1]
self.h[2] = self.h_tri[2]
self.h[3] = self.h_tri[3]
def test_f_as_dict(self):
kernels = vec2series(self.h_vec, self.N, self.M, form=self.form)
result = [np.all(h == self.h[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
class Kernel2VecTest(Vec2KernelTest):
def setUp(self):
super().setUp()
self.h_raw = {2: np.array([[1, 1, 3, 4],
[1, 5, 3, 7],
[0, 3, 8, 9],
[0, 0, 0, 10]]),
3: np.array([[[1, 2, 1, 4],
[0, 5, 6, 7],
[1, 0, 8, 9],
[0, 0, 0, 10]],
[[0, 0, 0, 0],
[0, 11, 12, 13],
[0, 0, 14, 15],
[0, 0, 0, 16]],
[[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 17, 18],
[0, 0, 0, 19]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 20]]])}
def test_triangular_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = kernel2vec(self.h_tri[n], form='tri')
self.assertTrue(np.all(result == self.h_vec[n]))
def test_symmetric_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = kernel2vec(self.h_sym[n], form='sym')
self.assertTrue(np.all(result == self.h_vec[n]))
def test_None_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = kernel2vec(self.h_raw[n], form=None)
self.assertTrue(np.all(result == self.h_vec[n]))
def test_error_raised(self):
h_not_squared = np.zeros((3, 3, 4))
self.assertRaises(ValueError, kernel2vec, h_not_squared)
#==============================================================================
# Main script
#==============================================================================
if __name__ == '__main__':
"""
Main script for testing.
"""
unittest.main()
| 37.26284
| 79
| 0.408059
|
6a324fb8ca9b0dd21e5f7d194541b6de8df01c66
| 2,928
|
py
|
Python
|
src/contrib/remove_disabled_photos.py
|
memento42429/metashape-scripts
|
6537719eb0fd52f649d410eb7e7fe8b6b6028302
|
[
"MIT"
] | 40
|
2017-10-17T16:18:21.000Z
|
2018-12-28T02:37:35.000Z
|
src/contrib/remove_disabled_photos.py
|
memento42429/metashape-scripts
|
6537719eb0fd52f649d410eb7e7fe8b6b6028302
|
[
"MIT"
] | 1
|
2018-07-11T06:41:38.000Z
|
2018-07-12T18:15:35.000Z
|
src/contrib/remove_disabled_photos.py
|
memento42429/metashape-scripts
|
6537719eb0fd52f649d410eb7e7fe8b6b6028302
|
[
"MIT"
] | 15
|
2017-11-22T11:16:16.000Z
|
2018-12-30T13:50:41.000Z
|
import datetime
import shutil
import Metashape
import os
import sys
from pathlib import Path
"""
Script for moving disabled photos, Metashape (v 1.7)
Matjaz Mori, CPA, October 2019
The script will create a new subdirectory in the photos directory,
move all the photos from the project marked "Disabled" into it and remove "Disabled" cameras prom Metashape project.
When using, it is advisable to monitor the Console (View -> Console).
"""
compatible_major_version = "1.8"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
def remove_disabled_photos():
print (datetime.datetime.now())
doc = Metashape.app.document
chunk = doc.chunk
counter = 0
counter_fail = 0
counter_not_moved = 0
counter_errors = 0
counter_cameras = 0
lenght = len(chunk.cameras)
message = 'Starting to evaluate ' + str(lenght) + ' photos...'
print (message)
for camera in chunk.cameras:
if camera.enabled is True:
counter_not_moved = counter_not_moved + 1
continue # skipping enabled cameras
photo_path = Path(camera.photo.path)
photo_name = str(camera.label)
destination_dir = photo_path.parent / 'Disabled'
destination = destination_dir / photo_path.name
if not destination_dir.exists():
try:
destination_dir.mkdir()
print ("Successfully created the directory %s " % destination_dir)
except OSError:
print ('Error creating %s' % destination_dir)
counter_errors = counter_errors + 1
continue # we can't create directory - thus we can't move photo - thus we shouldn't delete it
try:
if photo_path.is_file():
print ('Moving %s ...' % photo_name)
shutil.move(str(photo_path), str(destination))
counter = counter + 1
counter_cameras = counter_cameras + 1
else:
print ('Photo %s does not exist!' % photo_name)
counter_cameras = counter_cameras + 1
counter_fail = counter_fail + 1
chunk.remove(camera)
except OSError:
counter_errors = counter_errors + 1
print ('Error %s!' % photo_name)
message_end = 'Success, ' + str(counter) + ' photos moved, ' + str(counter_not_moved) + ' photos not moved.\nNumber of files unable to move: ' + str(counter_fail) + '\nNumber of cameras removed: ' + str(counter_cameras) + '\nNumber of unknown errorrs: '+ str(counter_errors)
print (message_end)
label = "Scripts/Remove disabled photos"
Metashape.app.addMenuItem(label, remove_disabled_photos)
print("To execute this script press {}".format(label))
| 35.277108
| 278
| 0.650273
|
29acbda764e1edfd5878167a1c23a822c4880b17
| 3,248
|
py
|
Python
|
mercury_agent/inspector/hwlib/mercury_id.py
|
jr0d/mercury-agent
|
12b75ecc951d3ab5cd15c5213df2412b108cf47c
|
[
"Apache-2.0"
] | null | null | null |
mercury_agent/inspector/hwlib/mercury_id.py
|
jr0d/mercury-agent
|
12b75ecc951d3ab5cd15c5213df2412b108cf47c
|
[
"Apache-2.0"
] | 4
|
2017-11-01T16:25:49.000Z
|
2018-08-22T13:50:23.000Z
|
mercury_agent/inspector/hwlib/mercury_id.py
|
jr0d/mercury-agent
|
12b75ecc951d3ab5cd15c5213df2412b108cf47c
|
[
"Apache-2.0"
] | 5
|
2017-10-19T12:40:15.000Z
|
2018-08-21T20:18:54.000Z
|
"""\
Functions for grabbing a MercuryID
MercuryID or mercury_id meta and hash value
[meta] - 00 = hash generated by interface mac addresses
01 = hash generated by asset tag and serial number
"""
import hashlib
import logging
from mercury.common.exceptions import MercuryIdException
LOG = logging.getLogger(__name__)
META_TYPE_MAC = '00'
META_TYPE_PRODUCT_UUID = '01'
META_TYPE_CHASSIS_ASSET_SERIAL = '02'
META_TYPE_BOARD_ASSET_SERIAL = '03'
def _build_hash(target, meta_type):
digest = hashlib.sha1(target).hexdigest()
return meta_type + digest
def get_embedded(inspected_interfaces):
embedded_interfaces = []
for interface in inspected_interfaces:
_biosdevname = interface['predictable_names'].get('biosdevname', '')
if _biosdevname:
if 'em' in _biosdevname:
embedded_interfaces.append(interface)
return embedded_interfaces
# methods
def dmi_methods(dmi):
product_uuid = dmi.get('product_uuid')
chassis_asset_tag = dmi.get('chassis_asset_tag')
chassis_serial = dmi.get('chassis_serial')
board_asset_tag = dmi.get('board_asset_tag')
board_serial = dmi.get('board_serial')
disqualify = 'To Be Filled By O.E.M.'
if product_uuid:
LOG.debug('Generating mercury ID using product_uuid: %s' % product_uuid)
return _build_hash(product_uuid, META_TYPE_PRODUCT_UUID)
if disqualify in [chassis_asset_tag, chassis_serial, board_asset_tag, board_serial]:
LOG.debug('Junk in DMI tables: \'%s\'' % disqualify)
return
if chassis_asset_tag and chassis_serial:
LOG.debug('Generating mercury ID using chassis asset information: tag=%s, asset=%s' % (
chassis_asset_tag, chassis_serial))
return _build_hash(chassis_asset_tag + chassis_serial, META_TYPE_CHASSIS_ASSET_SERIAL)
if board_asset_tag and board_serial:
LOG.debug('Generating mercury ID using board asset information: tag=%s, asset=%s' % (
board_asset_tag, board_serial))
return _build_hash(board_asset_tag + board_serial, META_TYPE_BOARD_ASSET_SERIAL)
def generate_mercury_id(inspected_dmi, inspected_interfaces):
mercury_id = dmi_methods(inspected_dmi)
if mercury_id:
return mercury_id
else:
meta_type = META_TYPE_MAC
embedded = get_embedded(inspected_interfaces)
if embedded:
LOG.debug('Generating mercury ID using embedded interfaces ')
inspected_interfaces = embedded
else:
LOG.debug('Generating mercury ID using all interfaces')
target = ''
for interface in inspected_interfaces:
address = interface.get('address') # mac address
if address:
target += address
if not target:
raise MercuryIdException('Could not generate MercuryId')
return _build_hash(target, meta_type)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
from mercury_agent.inspector.inspectors.dmi import dmi_inspector
from mercury_agent.inspector.inspectors.interfaces import interface_inspector
_dmi = dmi_inspector()
_interfaces = interface_inspector()
print(generate_mercury_id(_dmi, _interfaces))
| 31.533981
| 95
| 0.708436
|
00f8ccd1aae07cc23ef791fa583fa7fd132bb729
| 28,654
|
py
|
Python
|
spyder/plugins/outlineexplorer/widgets.py
|
sharmalabs/spyder
|
7d2201699b6df6d4e72c73379dca0510f643bef9
|
[
"MIT"
] | null | null | null |
spyder/plugins/outlineexplorer/widgets.py
|
sharmalabs/spyder
|
7d2201699b6df6d4e72c73379dca0510f643bef9
|
[
"MIT"
] | null | null | null |
spyder/plugins/outlineexplorer/widgets.py
|
sharmalabs/spyder
|
7d2201699b6df6d4e72c73379dca0510f643bef9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Outline explorer widgets."""
# Standard library imports
from __future__ import print_function
import os.path as osp
# Third party imports
from qtpy.compat import from_qvariant
from qtpy.QtCore import QSize, Qt, Signal, Slot
from qtpy.QtWidgets import (QHBoxLayout, QTreeWidgetItem, QVBoxLayout, QWidget,
QTreeWidgetItemIterator)
# Local imports
from spyder.config.base import _, STDOUT
from spyder.py3compat import to_text_string
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import (create_action, create_toolbutton,
set_item_user_text, create_plugin_layout)
from spyder.widgets.onecolumntree import OneColumnTree
class FileRootItem(QTreeWidgetItem):
def __init__(self, path, treewidget, is_python=True):
QTreeWidgetItem.__init__(self, treewidget, QTreeWidgetItem.Type)
self.path = path
self.setIcon(
0, ima.icon('python') if is_python else ima.icon('TextFileIcon'))
self.setToolTip(0, path)
set_item_user_text(self, path)
def set_path(self, path, fullpath):
self.path = path
self.set_text(fullpath)
def set_text(self, fullpath):
self.setText(0, self.path if fullpath else osp.basename(self.path))
class TreeItem(QTreeWidgetItem):
"""Class browser item base class"""
def __init__(self, name, line, parent, preceding):
if preceding is None:
QTreeWidgetItem.__init__(self, parent, QTreeWidgetItem.Type)
else:
if preceding is not parent:
# Preceding must be either the same as item's parent
# or have the same parent as item
while preceding.parent() is not parent:
preceding = preceding.parent()
if preceding is None:
break
if preceding is None:
QTreeWidgetItem.__init__(self, parent, QTreeWidgetItem.Type)
else:
QTreeWidgetItem.__init__(self, parent, preceding,
QTreeWidgetItem.Type)
self.setText(0, name)
parent_text = from_qvariant(parent.data(0, Qt.UserRole),
to_text_string)
set_item_user_text(self, parent_text+'/'+name)
self.line = line
def set_icon(self, icon):
self.setIcon(0, icon)
def setup(self):
self.setToolTip(0, _("Line %s") % str(self.line))
class ClassItem(TreeItem):
def setup(self):
self.set_icon(ima.icon('class'))
self.setToolTip(0, _("Class defined at line %s") % str(self.line))
class FunctionItem(TreeItem):
def is_method(self):
return isinstance(self.parent(), ClassItem)
def setup(self):
if self.is_method():
self.setToolTip(0, _("Method defined at line %s") % str(self.line))
name = to_text_string(self.text(0))
if name.startswith('__'):
self.set_icon(ima.icon('private2'))
elif name.startswith('_'):
self.set_icon(ima.icon('private1'))
else:
self.set_icon(ima.icon('method'))
else:
self.set_icon(ima.icon('function'))
self.setToolTip(0, _("Function defined at line %s"
) % str(self.line))
class CommentItem(TreeItem):
def __init__(self, name, line, parent, preceding):
name = name.lstrip("# ")
TreeItem.__init__(self, name, line, parent, preceding)
def setup(self):
self.set_icon(ima.icon('blockcomment'))
font = self.font(0)
font.setItalic(True)
self.setFont(0, font)
self.setToolTip(0, _("Line %s") % str(self.line))
class CellItem(TreeItem):
def __init__(self, name, line, parent, preceding):
TreeItem.__init__(self, name, line, parent, preceding)
def setup(self):
self.set_icon(ima.icon('cell'))
font = self.font(0)
font.setItalic(True)
self.setFont(0, font)
self.setToolTip(0, _("Cell starts at line %s") % str(self.line))
def get_item_children(item):
"""Return a sorted list of all the children items of 'item'."""
children = [item.child(index) for index in range(item.childCount())]
for child in children[:]:
others = get_item_children(child)
if others is not None:
children += others
return sorted(children, key=lambda child: child.line)
def item_at_line(root_item, line):
"""
Find and return the item of the outline explorer under which is located
the specified 'line' of the editor.
"""
previous_item = root_item
for item in get_item_children(root_item):
if item.line > line:
return previous_item
previous_item = item
else:
return item
def remove_from_tree_cache(tree_cache, line=None, item=None):
if line is None:
for line, (_it, _level, _debug) in list(tree_cache.items()):
if _it is item:
break
item, _level, debug = tree_cache.pop(line)
try:
for child in [item.child(_i) for _i in range(item.childCount())]:
remove_from_tree_cache(tree_cache, item=child)
item.parent().removeChild(item)
except RuntimeError:
# Item has already been deleted
#XXX: remove this debug-related fragment of code
print("unable to remove tree item: ", debug, file=STDOUT)
class OutlineExplorerTreeWidget(OneColumnTree):
def __init__(self, parent, show_fullpath=False, show_all_files=True,
group_cells=True, show_comments=True,
sort_files_alphabetically=False):
self.show_fullpath = show_fullpath
self.show_all_files = show_all_files
self.group_cells = group_cells
self.show_comments = show_comments
self.sort_files_alphabetically = sort_files_alphabetically
OneColumnTree.__init__(self, parent)
self.freeze = False # Freezing widget to avoid any unwanted update
self.editor_items = {}
self.editor_tree_cache = {}
self.editor_ids = {}
self.ordered_editor_ids = []
self.current_editor = None
title = _("Outline")
self.set_title(title)
self.setWindowTitle(title)
self.setUniformRowHeights(True)
def get_actions_from_items(self, items):
"""Reimplemented OneColumnTree method"""
fromcursor_act = create_action(self, text=_('Go to cursor position'),
icon=ima.icon('fromcursor'),
triggered=self.go_to_cursor_position)
fullpath_act = create_action(self, text=_('Show absolute path'),
toggled=self.toggle_fullpath_mode)
fullpath_act.setChecked(self.show_fullpath)
allfiles_act = create_action(self, text=_('Show all files'),
toggled=self.toggle_show_all_files)
allfiles_act.setChecked(self.show_all_files)
comment_act = create_action(self, text=_('Show special comments'),
toggled=self.toggle_show_comments)
comment_act.setChecked(self.show_comments)
group_cells_act = create_action(self, text=_('Group code cells'),
toggled=self.toggle_group_cells)
group_cells_act.setChecked(self.group_cells)
sort_files_alphabetically_act = create_action(
self, text=_('Sort files alphabetically'),
toggled=self.toggle_sort_files_alphabetically)
sort_files_alphabetically_act.setChecked(
self.sort_files_alphabetically)
actions = [fullpath_act, allfiles_act, group_cells_act, comment_act,
sort_files_alphabetically_act, fromcursor_act]
return actions
@Slot(bool)
def toggle_fullpath_mode(self, state):
self.show_fullpath = state
self.setTextElideMode(Qt.ElideMiddle if state else Qt.ElideRight)
for index in range(self.topLevelItemCount()):
self.topLevelItem(index).set_text(fullpath=self.show_fullpath)
def __hide_or_show_root_items(self, item):
"""
show_all_files option is disabled: hide all root items except *item*
show_all_files option is enabled: do nothing
"""
for _it in self.get_top_level_items():
_it.setHidden(_it is not item and not self.show_all_files)
@Slot(bool)
def toggle_show_all_files(self, state):
self.show_all_files = state
if self.current_editor is not None:
editor_id = self.editor_ids[self.current_editor]
item = self.editor_items[editor_id]
self.__hide_or_show_root_items(item)
self.__sort_toplevel_items()
if self.show_all_files is False:
self.root_item_selected(
self.editor_items[self.editor_ids[self.current_editor]])
@Slot(bool)
def toggle_show_comments(self, state):
self.show_comments = state
self.update_all()
@Slot(bool)
def toggle_group_cells(self, state):
self.group_cells = state
self.update_all()
@Slot(bool)
def toggle_sort_files_alphabetically(self, state):
self.sort_files_alphabetically = state
self.update_all()
self.__sort_toplevel_items()
@Slot()
def go_to_cursor_position(self):
if self.current_editor is not None:
line = self.current_editor.get_cursor_line_number()
editor_id = self.editor_ids[self.current_editor]
root_item = self.editor_items[editor_id]
item = item_at_line(root_item, line)
self.setCurrentItem(item)
self.scrollToItem(item)
def clear(self):
"""Reimplemented Qt method"""
self.set_title('')
OneColumnTree.clear(self)
def set_current_editor(self, editor, update):
"""Bind editor instance"""
editor_id = editor.get_id()
if editor_id in list(self.editor_ids.values()):
item = self.editor_items[editor_id]
if not self.freeze:
self.scrollToItem(item)
self.root_item_selected(item)
self.__hide_or_show_root_items(item)
if update:
self.save_expanded_state()
tree_cache = self.editor_tree_cache[editor_id]
self.populate_branch(editor, item, tree_cache)
self.restore_expanded_state()
else:
root_item = FileRootItem(editor.fname, self, editor.is_python())
root_item.set_text(fullpath=self.show_fullpath)
tree_cache = self.populate_branch(editor, root_item)
self.__hide_or_show_root_items(root_item)
self.root_item_selected(root_item)
self.editor_items[editor_id] = root_item
self.editor_tree_cache[editor_id] = tree_cache
self.resizeColumnToContents(0)
if editor not in self.editor_ids:
self.editor_ids[editor] = editor_id
self.ordered_editor_ids.append(editor_id)
self.__sort_toplevel_items()
self.current_editor = editor
def file_renamed(self, editor, new_filename):
"""File was renamed, updating outline explorer tree"""
editor_id = editor.get_id()
if editor_id in list(self.editor_ids.values()):
root_item = self.editor_items[editor_id]
root_item.set_path(new_filename, fullpath=self.show_fullpath)
self.__sort_toplevel_items()
def update_all(self):
self.save_expanded_state()
for editor, editor_id in list(self.editor_ids.items()):
item = self.editor_items[editor_id]
tree_cache = self.editor_tree_cache[editor_id]
self.populate_branch(editor, item, tree_cache)
self.restore_expanded_state()
def remove_editor(self, editor):
if editor in self.editor_ids:
if self.current_editor is editor:
self.current_editor = None
editor_id = self.editor_ids.pop(editor)
if editor_id in self.ordered_editor_ids:
self.ordered_editor_ids.remove(editor_id)
if editor_id not in list(self.editor_ids.values()):
root_item = self.editor_items.pop(editor_id)
self.editor_tree_cache.pop(editor_id)
try:
self.takeTopLevelItem(self.indexOfTopLevelItem(root_item))
except RuntimeError:
# item has already been removed
pass
def set_editor_ids_order(self, ordered_editor_ids):
"""
Order the root file items in the Outline Explorer following the
provided list of editor ids.
"""
if self.ordered_editor_ids != ordered_editor_ids:
self.ordered_editor_ids = ordered_editor_ids
if self.sort_files_alphabetically is False:
self.__sort_toplevel_items()
def __sort_toplevel_items(self):
"""
Sort the root file items in alphabetical order if
'sort_files_alphabetically' is True, else order the items as
specified in the 'self.ordered_editor_ids' list.
"""
if self.show_all_files is False:
return
current_ordered_items = [self.topLevelItem(index) for index in
range(self.topLevelItemCount())]
if self.sort_files_alphabetically:
new_ordered_items = sorted(
current_ordered_items,
key=lambda item: osp.basename(item.path.lower()))
else:
new_ordered_items = [
self.editor_items.get(e_id) for e_id in
self.ordered_editor_ids if
self.editor_items.get(e_id) is not None]
if current_ordered_items != new_ordered_items:
selected_items = self.selectedItems()
self.save_expanded_state()
for index in range(self.topLevelItemCount()):
self.takeTopLevelItem(0)
for index, item in enumerate(new_ordered_items):
self.insertTopLevelItem(index, item)
self.restore_expanded_state()
self.clearSelection()
if selected_items:
selected_items[-1].setSelected(True)
def populate_branch(self, editor, root_item, tree_cache=None):
"""
Generates an outline of the editor's content and stores the result
in a cache.
"""
if tree_cache is None:
tree_cache = {}
# Removing cached items for which line is > total line nb
for _l in list(tree_cache.keys()):
if _l >= editor.get_line_count():
# Checking if key is still in tree cache in case one of its
# ancestors was deleted in the meantime (deleting all children):
if _l in tree_cache:
remove_from_tree_cache(tree_cache, line=_l)
ancestors = [(root_item, 0)]
cell_ancestors = [(root_item, 0)]
previous_item = None
previous_level = None
prev_cell_level = None
prev_cell_item = None
oe_data = editor.get_outlineexplorer_data()
for block_nb in range(editor.get_line_count()):
line_nb = block_nb+1
data = oe_data.get(block_nb)
level = None if data is None else data.fold_level
citem, clevel, _d = tree_cache.get(line_nb, (None, None, ""))
# Skip iteration if line is not the first line of a foldable block
if level is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Searching for class/function statements
not_class_nor_function = data.is_not_class_nor_function()
if not not_class_nor_function:
class_name = data.get_class_name()
if class_name is None:
func_name = data.get_function_name()
if func_name is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Skip iteration for if/else/try/for/etc foldable blocks.
if not_class_nor_function and not data.is_comment():
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
cname = to_text_string(citem.text(0))
cparent = citem.parent
# Blocks for Cell Groups.
if (data is not None and data.def_type == data.CELL and
self.group_cells):
preceding = (root_item if previous_item is None
else previous_item)
cell_level = data.cell_level
if prev_cell_level is not None:
if cell_level == prev_cell_level:
pass
elif cell_level > prev_cell_level:
cell_ancestors.append((prev_cell_item,
prev_cell_level))
else:
while (len(cell_ancestors) > 1 and
cell_level <= prev_cell_level):
cell_ancestors.pop(-1)
_item, prev_cell_level = cell_ancestors[-1]
parent, _level = cell_ancestors[-1]
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = CellItem(data.def_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
ancestors = [(item, 0)]
prev_cell_level = cell_level
prev_cell_item = item
previous_item = item
continue
# Blocks for Code Groups.
if previous_level is not None:
if level == previous_level:
pass
elif level > previous_level+4: # Invalid indentation
continue
elif level > previous_level:
ancestors.append((previous_item, previous_level))
else:
while len(ancestors) > 1 and level <= previous_level:
ancestors.pop(-1)
_item, previous_level = ancestors[-1]
parent, _level = ancestors[-1]
preceding = root_item if previous_item is None else previous_item
if not_class_nor_function and data.is_comment():
if not self.show_comments:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
if data.def_type == data.CELL:
item = CellItem(data.def_name, line_nb, parent, preceding)
else:
item = CommentItem(data.text, line_nb, parent, preceding)
elif class_name is not None:
if citem is not None:
if (class_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = ClassItem(class_name, line_nb, parent, preceding)
else:
if citem is not None:
if (func_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = FunctionItem(func_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
previous_level = level
previous_item = item
return tree_cache
def root_item_selected(self, item):
"""Root item has been selected: expanding it and collapsing others"""
if self.show_all_files:
return
for root_item in self.get_top_level_items():
if root_item is item:
self.expandItem(root_item)
else:
self.collapseItem(root_item)
def restore(self):
"""Reimplemented OneColumnTree method"""
if self.current_editor is not None:
self.collapseAll()
editor_id = self.editor_ids[self.current_editor]
self.root_item_selected(self.editor_items[editor_id])
def get_root_item(self, item):
"""Return the root item of the specified item."""
root_item = item
while isinstance(root_item.parent(), QTreeWidgetItem):
root_item = root_item.parent()
return root_item
def get_visible_items(self):
"""Return a list of all visible items in the treewidget."""
items = []
iterator = QTreeWidgetItemIterator(self)
while iterator.value():
item = iterator.value()
if not item.isHidden():
if item.parent():
if item.parent().isExpanded():
items.append(item)
else:
items.append(item)
iterator += 1
return items
def activated(self, item):
"""Double-click event"""
editor_item = self.editor_items.get(
self.editor_ids.get(self.current_editor))
line = 0
if item == editor_item:
line = 1
elif isinstance(item, TreeItem):
line = item.line
self.freeze = True
root_item = self.get_root_item(item)
if line:
self.parent().edit_goto.emit(root_item.path, line, item.text(0))
else:
self.parent().edit.emit(root_item.path)
self.freeze = False
parent = self.current_editor.parent()
for editor_id, i_item in list(self.editor_items.items()):
if i_item is root_item:
for editor, _id in list(self.editor_ids.items()):
if _id == editor_id and editor.parent() is parent:
self.current_editor = editor
break
break
def clicked(self, item):
"""Click event"""
if isinstance(item, FileRootItem):
self.root_item_selected(item)
self.activated(item)
class OutlineExplorerWidget(QWidget):
"""Class browser"""
edit_goto = Signal(str, int, str)
edit = Signal(str)
is_visible = Signal()
def __init__(self, parent=None, show_fullpath=True, show_all_files=True,
group_cells=True, show_comments=True,
sort_files_alphabetically=False,
options_button=None):
QWidget.__init__(self, parent)
self.treewidget = OutlineExplorerTreeWidget(
self,
show_fullpath=show_fullpath,
show_all_files=show_all_files,
group_cells=group_cells,
show_comments=show_comments,
sort_files_alphabetically=sort_files_alphabetically)
self.visibility_action = create_action(self,
_("Show/hide outline explorer"),
icon='outline_explorer_vis.png',
toggled=self.toggle_visibility)
self.visibility_action.setChecked(True)
btn_layout = QHBoxLayout()
for btn in self.setup_buttons():
btn.setAutoRaise(True)
btn.setIconSize(QSize(16, 16))
btn_layout.addWidget(btn)
if options_button:
btn_layout.addStretch()
btn_layout.addWidget(options_button, Qt.AlignRight)
layout = create_plugin_layout(btn_layout, self.treewidget)
self.setLayout(layout)
@Slot(bool)
def toggle_visibility(self, state):
self.setVisible(state)
current_editor = self.treewidget.current_editor
if current_editor is not None:
current_editor.give_focus()
if state:
self.is_visible.emit()
def setup_buttons(self):
"""Setup the buttons of the outline explorer widget toolbar."""
self.fromcursor_btn = create_toolbutton(
self, icon=ima.icon('fromcursor'), tip=_('Go to cursor position'),
triggered=self.treewidget.go_to_cursor_position)
buttons = [self.fromcursor_btn]
for action in [self.treewidget.collapse_all_action,
self.treewidget.expand_all_action,
self.treewidget.restore_action,
self.treewidget.collapse_selection_action,
self.treewidget.expand_selection_action]:
buttons.append(create_toolbutton(self))
buttons[-1].setDefaultAction(action)
return buttons
def set_current_editor(self, editor, update, clear):
if clear:
self.remove_editor(editor)
if editor is not None:
self.treewidget.set_current_editor(editor, update)
def remove_editor(self, editor):
self.treewidget.remove_editor(editor)
def get_options(self):
"""
Return outline explorer options
"""
return dict(
show_fullpath=self.treewidget.show_fullpath,
show_all_files=self.treewidget.show_all_files,
group_cells=self.treewidget.group_cells,
show_comments=self.treewidget.show_comments,
sort_files_alphabetically=(
self.treewidget.sort_files_alphabetically),
expanded_state=self.treewidget.get_expanded_state(),
scrollbar_position=self.treewidget.get_scrollbar_position(),
visibility=self.isVisible()
)
def update(self):
self.treewidget.update_all()
def file_renamed(self, editor, new_filename):
self.treewidget.file_renamed(editor, new_filename)
| 41.407514
| 81
| 0.567565
|
073cb02438dcb6a1e809ddd721e3e65f2b7ed3ad
| 1,204
|
py
|
Python
|
Examples/CommonOptions/AddWatermark.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python-samples-
|
18e8eb850c3ef392a3ba310d08cc40299f9ffb3c
|
[
"MIT"
] | null | null | null |
Examples/CommonOptions/AddWatermark.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python-samples-
|
18e8eb850c3ef392a3ba310d08cc40299f9ffb3c
|
[
"MIT"
] | null | null | null |
Examples/CommonOptions/AddWatermark.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python-samples-
|
18e8eb850c3ef392a3ba310d08cc40299f9ffb3c
|
[
"MIT"
] | 1
|
2019-05-09T13:07:21.000Z
|
2019-05-09T13:07:21.000Z
|
# Import modules
import groupdocs_conversion_cloud
from Common import Common
# This example demonstrates how to convert word processing document into pdf document with adding watermark
class AddWatermark:
@classmethod
def Run(cls):
# Create necessary API instances
apiInstance = groupdocs_conversion_cloud.ConvertApi.from_config(Common.GetConfig())
# Prepare convert settings
settings = groupdocs_conversion_cloud.ConvertSettings()
settings.file_path = "WordProcessing/four-pages.docx"
settings.format = "pdf"
settings.convert_options = groupdocs_conversion_cloud.PdfConvertOptions()
watermark = groupdocs_conversion_cloud.WatermarkOptions()
watermark.text = "Sample watermark"
watermark.color = "Red"
watermark.width = 100
watermark.height = 100
watermark.background = True
settings.watermark_options = watermark
settings.output_path = "converted"
# Convert
result = apiInstance.convert_document(groupdocs_conversion_cloud.ConvertDocumentRequest(settings))
print("Document converted: " + result[0].url)
| 37.625
| 107
| 0.695183
|
5467e243caea3a0613d71dac7fb843feb8bf18e7
| 395
|
py
|
Python
|
src/blog_main/wsgi.py
|
SleepNoMore/django_blog_site
|
d23397e1595c488c424ed7eb46d1f844afd8178e
|
[
"MIT"
] | null | null | null |
src/blog_main/wsgi.py
|
SleepNoMore/django_blog_site
|
d23397e1595c488c424ed7eb46d1f844afd8178e
|
[
"MIT"
] | null | null | null |
src/blog_main/wsgi.py
|
SleepNoMore/django_blog_site
|
d23397e1595c488c424ed7eb46d1f844afd8178e
|
[
"MIT"
] | null | null | null |
"""
WSGI config for blog_main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_main.settings')
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
236d64951d3a9cf121b72c6b0093f05371d0d142
| 18,191
|
py
|
Python
|
second/data/kitti_dataset.py
|
lpj0822/pointpillars_train
|
43594b50fcb5124ab449f1596ab2a9713de18051
|
[
"MIT"
] | 1
|
2020-09-05T09:18:53.000Z
|
2020-09-05T09:18:53.000Z
|
second/data/kitti_dataset.py
|
lpj0822/pointpillars_train
|
43594b50fcb5124ab449f1596ab2a9713de18051
|
[
"MIT"
] | 1
|
2021-04-29T07:04:30.000Z
|
2021-04-29T07:04:30.000Z
|
second/data/kitti_dataset.py
|
lpj0822/pointpillars_train
|
43594b50fcb5124ab449f1596ab2a9713de18051
|
[
"MIT"
] | 1
|
2020-08-25T09:06:42.000Z
|
2020-08-25T09:06:42.000Z
|
from pathlib import Path
import pickle
import time
from functools import partial
import numpy as np
from second.core import box_np_ops
from second.core import preprocess as prep
from second.data import kitti_common as kitti
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.data.dataset import Dataset, register_dataset
from second.utils.progress_bar import progress_bar_iter as prog_bar
@register_dataset
class KittiDataset(Dataset):
NumPointFeatures = 4
def __init__(self,
root_path,
info_path,
class_names=None,
prep_func=None,
num_point_features=None):
assert info_path is not None
with open(info_path, 'rb') as f:
infos = pickle.load(f)
self._root_path = Path(root_path)
self._kitti_infos = infos
print("remain number of infos:", len(self._kitti_infos))
self._class_names = class_names
self._prep_func = prep_func
def __len__(self):
return len(self._kitti_infos)
def convert_detection_to_kitti_annos(self, detection):
class_names = self._class_names
det_image_idxes = [det["metadata"]["image_idx"] for det in detection]
gt_image_idxes = [
info["image"]["image_idx"] for info in self._kitti_infos
]
annos = []
for i in range(len(detection)):
det_idx = det_image_idxes[i]
det = detection[i]
# info = self._kitti_infos[gt_image_idxes.index(det_idx)]
info = self._kitti_infos[i]
calib = info["calib"]
rect = calib["R0_rect"]
Trv2c = calib["Tr_velo_to_cam"]
P2 = calib["P2"]
final_box_preds = det["box3d_lidar"].detach().cpu().numpy()
label_preds = det["label_preds"].detach().cpu().numpy()
scores = det["scores"].detach().cpu().numpy()
if final_box_preds.shape[0] != 0:
final_box_preds[:, 2] -= final_box_preds[:, 5] / 2
box3d_camera = box_np_ops.box_lidar_to_camera(
final_box_preds, rect, Trv2c)
locs = box3d_camera[:, :3]
dims = box3d_camera[:, 3:6]
angles = box3d_camera[:, 6]
camera_box_origin = [0.5, 1.0, 0.5]
box_corners = box_np_ops.center_to_corner_box3d(
locs, dims, angles, camera_box_origin, axis=1)
box_corners_in_image = box_np_ops.project_to_image(
box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
bbox = np.concatenate([minxy, maxxy], axis=1)
anno = kitti.get_start_result_anno()
num_example = 0
box3d_lidar = final_box_preds
for j in range(box3d_lidar.shape[0]):
image_shape = info["image"]["image_shape"]
if bbox[j, 0] > image_shape[1] or bbox[j, 1] > image_shape[0]:
continue
if bbox[j, 2] < 0 or bbox[j, 3] < 0:
continue
bbox[j, 2:] = np.minimum(bbox[j, 2:], image_shape[::-1])
bbox[j, :2] = np.maximum(bbox[j, :2], [0, 0])
anno["bbox"].append(bbox[j])
# convert center format to kitti format
# box3d_lidar[j, 2] -= box3d_lidar[j, 5] / 2
anno["alpha"].append(
-np.arctan2(-box3d_lidar[j, 1], box3d_lidar[j, 0]) +
box3d_camera[j, 6])
anno["dimensions"].append(box3d_camera[j, 3:6])
anno["location"].append(box3d_camera[j, :3])
anno["rotation_y"].append(box3d_camera[j, 6])
anno["name"].append(class_names[int(label_preds[j])])
anno["truncated"].append(0.0)
anno["occluded"].append(0)
anno["score"].append(scores[j])
num_example += 1
if num_example != 0:
anno = {n: np.stack(v) for n, v in anno.items()}
annos.append(anno)
else:
annos.append(kitti.empty_result_anno())
num_example = annos[-1]["name"].shape[0]
annos[-1]["metadata"] = det["metadata"]
return annos
def evaluation(self, detections, output_dir):
"""
detection
When you want to eval your own dataset, you MUST set correct
the z axis and box z center.
If you want to eval by my KITTI eval function, you must
provide the correct format annotations.
ground_truth_annotations format:
{
bbox: [N, 4], if you fill fake data, MUST HAVE >25 HEIGHT!!!!!!
alpha: [N], you can use -10 to ignore it.
occluded: [N], you can use zero.
truncated: [N], you can use zero.
name: [N]
location: [N, 3] center of 3d box.
dimensions: [N, 3] dim of 3d box.
rotation_y: [N] angle.
}
all fields must be filled, but some fields can fill
zero.
"""
if "annos" not in self._kitti_infos[0]:
return None
gt_annos = [info["annos"] for info in self._kitti_infos]
dt_annos = self.convert_detection_to_kitti_annos(detections)
# firstly convert standard detection to kitti-format dt annos
z_axis = 1 # KITTI camera format use y as regular "z" axis.
z_center = 1.0 # KITTI camera box's center is [0.5, 1, 0.5]
# for regular raw lidar data, z_axis = 2, z_center = 0.5.
result_official_dict = get_official_eval_result(
gt_annos,
dt_annos,
self._class_names,
z_axis=z_axis,
z_center=z_center)
result_coco = get_coco_eval_result(
gt_annos,
dt_annos,
self._class_names,
z_axis=z_axis,
z_center=z_center)
return {
"results": {
"official": result_official_dict["result"],
"coco": result_coco["result"],
},
"detail": {
"eval.kitti": {
"official": result_official_dict["detail"],
"coco": result_coco["detail"]
}
},
}
def __getitem__(self, idx):
input_dict = self.get_sensor_data(idx)
example = self._prep_func(input_dict=input_dict)
example["metadata"] = {}
if "image_idx" in input_dict["metadata"]:
example["metadata"] = input_dict["metadata"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
def get_sensor_data(self, query):
read_image = False
idx = query
if isinstance(query, dict):
read_image = "cam" in query
assert "lidar" in query
idx = query["lidar"]["idx"]
info = self._kitti_infos[idx]
res = {
"lidar": {
"type": "lidar",
"points": None,
},
"metadata": {
"image_idx": info["image"]["image_idx"],
"image_shape": info["image"]["image_shape"],
},
"calib": None,
"cam": {}
}
pc_info = info["point_cloud"]
velo_path = Path(pc_info['velodyne_path'])
if not velo_path.is_absolute():
velo_path = Path(self._root_path) / pc_info['velodyne_path']
velo_reduced_path = velo_path.parent.parent / (
velo_path.parent.stem + '_reduced') / velo_path.name
if velo_reduced_path.exists():
velo_path = velo_reduced_path
points = np.fromfile(
str(velo_path), dtype=np.float32,
count=-1).reshape([-1, self.NumPointFeatures])
res["lidar"]["points"] = points
image_info = info["image"]
image_path = image_info['image_path']
if read_image:
image_path = self._root_path / image_path
with open(str(image_path), 'rb') as f:
image_str = f.read()
res["cam"] = {
"type": "camera",
"data": image_str,
"datatype": image_path.suffix[1:],
}
calib = info["calib"]
calib_dict = {
'rect': calib['R0_rect'],
'Trv2c': calib['Tr_velo_to_cam'],
'P2': calib['P2'],
}
res["calib"] = calib_dict
if 'annos' in info:
annos = info['annos']
# we need other objects to avoid collision when sample
annos = kitti.remove_dontcare(annos)
locs = annos["location"]
dims = annos["dimensions"]
rots = annos["rotation_y"]
gt_names = annos["name"]
# rots = np.concatenate([np.zeros([locs.shape[0], 2], dtype=np.float32), rots], axis=1)
gt_boxes = np.concatenate([locs, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
calib = info["calib"]
gt_boxes = box_np_ops.box_camera_to_lidar(
gt_boxes, calib["R0_rect"], calib["Tr_velo_to_cam"])
# only center format is allowed. so we need to convert
# kitti [0.5, 0.5, 0] center to [0.5, 0.5, 0.5]
box_np_ops.change_box3d_center_(gt_boxes, [0.5, 0.5, 0],
[0.5, 0.5, 0.5])
res["lidar"]["annotations"] = {
'boxes': gt_boxes,
'names': gt_names,
}
res["cam"]["annotations"] = {
'boxes': annos["bbox"],
'names': gt_names,
}
return res
def convert_to_kitti_info_version2(info):
"""convert kitti info v1 to v2 if possible.
"""
if "image" not in info or "calib" not in info or "point_cloud" not in info:
info["image"] = {
'image_shape': info["img_shape"],
'image_idx': info['image_idx'],
'image_path': info['img_path'],
}
info["calib"] = {
"R0_rect": info['calib/R0_rect'],
"Tr_velo_to_cam": info['calib/Tr_velo_to_cam'],
"P2": info['calib/P2'],
}
info["point_cloud"] = {
"velodyne_path": info['velodyne_path'],
}
def kitti_anno_to_label_file(annos, folder):
folder = Path(folder)
for anno in annos:
image_idx = anno["metadata"]["image_idx"]
label_lines = []
for j in range(anno["bbox"].shape[0]):
label_dict = {
'name': anno["name"][j],
'alpha': anno["alpha"][j],
'bbox': anno["bbox"][j],
'location': anno["location"][j],
'dimensions': anno["dimensions"][j],
'rotation_y': anno["rotation_y"][j],
'score': anno["score"][j],
}
label_line = kitti.kitti_result_line(label_dict)
label_lines.append(label_line)
label_file = folder / f"{kitti.get_image_index_str(image_idx)}.txt"
label_str = '\n'.join(label_lines)
with open(label_file, 'w') as f:
f.write(label_str)
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def _calculate_num_points_in_gt(data_path,
infos,
relative_path,
remove_outside=True,
num_features=4):
for info in infos:
pc_info = info["point_cloud"]
image_info = info["image"]
calib = info["calib"]
if relative_path:
v_path = str(Path(data_path) / pc_info["velodyne_path"])
else:
v_path = pc_info["velodyne_path"]
points_v = np.fromfile(
v_path, dtype=np.float32, count=-1).reshape([-1, num_features])
rect = calib['R0_rect']
Trv2c = calib['Tr_velo_to_cam']
P2 = calib['P2']
if remove_outside:
points_v = box_np_ops.remove_outside_points(
points_v, rect, Trv2c, P2, image_info["image_shape"])
# points_v = points_v[points_v[:, 0] > 0]
annos = info['annos']
num_obj = len([n for n in annos['name'] if n != 'DontCare'])
# annos = kitti.filter_kitti_anno(annos, ['DontCare'])
dims = annos['dimensions'][:num_obj]
loc = annos['location'][:num_obj]
rots = annos['rotation_y'][:num_obj]
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
gt_boxes_lidar = box_np_ops.box_camera_to_lidar(
gt_boxes_camera, rect, Trv2c)
indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar)
num_points_in_gt = indices.sum(0)
num_ignored = len(annos['dimensions']) - num_obj
num_points_in_gt = np.concatenate(
[num_points_in_gt, -np.ones([num_ignored])])
annos["num_points_in_gt"] = num_points_in_gt.astype(np.int32)
def create_kitti_info_file(data_path, save_path=None, relative_path=True):
imageset_folder = Path(__file__).resolve().parent / "ImageSets"
train_img_ids = _read_imageset_file(str(imageset_folder / "train.txt"))
val_img_ids = _read_imageset_file(str(imageset_folder / "val.txt"))
test_img_ids = _read_imageset_file(str(imageset_folder / "test.txt"))
print("Generate info. this may take several minutes.")
if save_path is None:
save_path = Path(data_path)
else:
save_path = Path(save_path)
kitti_infos_train = kitti.get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=train_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path)
filename = save_path / 'kitti_infos_train.pkl'
print(f"Kitti info train file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
kitti_infos_val = kitti.get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=val_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path)
filename = save_path / 'kitti_infos_val.pkl'
print(f"Kitti info val file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
filename = save_path / 'kitti_infos_trainval.pkl'
print(f"Kitti info trainval file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
kitti_infos_test = kitti.get_kitti_image_info(
data_path,
training=False,
label_info=False,
velodyne=True,
calib=True,
image_ids=test_img_ids,
relative_path=relative_path)
filename = save_path / 'kitti_infos_test.pkl'
print(f"Kitti info test file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
def _create_reduced_point_cloud(data_path,
info_path,
save_path=None,
back=False):
with open(info_path, 'rb') as f:
kitti_infos = pickle.load(f)
for info in prog_bar(kitti_infos):
pc_info = info["point_cloud"]
image_info = info["image"]
calib = info["calib"]
v_path = pc_info['velodyne_path']
v_path = Path(data_path) / v_path
points_v = np.fromfile(
str(v_path), dtype=np.float32, count=-1).reshape([-1, 4])
rect = calib['R0_rect']
P2 = calib['P2']
Trv2c = calib['Tr_velo_to_cam']
# first remove z < 0 points
# keep = points_v[:, -1] > 0
# points_v = points_v[keep]
# then remove outside.
if back:
points_v[:, 0] = -points_v[:, 0]
points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2,
image_info["image_shape"])
if save_path is None:
save_filename = v_path.parent.parent / (
v_path.parent.stem + "_reduced") / v_path.name
# save_filename = str(v_path) + '_reduced'
if back:
save_filename += "_back"
else:
save_filename = str(Path(save_path) / v_path.name)
if back:
save_filename += "_back"
with open(save_filename, 'w') as f:
points_v.tofile(f)
def create_reduced_point_cloud(data_path,
train_info_path=None,
val_info_path=None,
test_info_path=None,
save_path=None,
with_back=False):
if train_info_path is None:
train_info_path = Path(data_path) / 'kitti_infos_train.pkl'
if val_info_path is None:
val_info_path = Path(data_path) / 'kitti_infos_val.pkl'
if test_info_path is None:
test_info_path = Path(data_path) / 'kitti_infos_test.pkl'
_create_reduced_point_cloud(data_path, train_info_path, save_path)
_create_reduced_point_cloud(data_path, val_info_path, save_path)
_create_reduced_point_cloud(data_path, test_info_path, save_path)
if with_back:
_create_reduced_point_cloud(
data_path, train_info_path, save_path, back=True)
_create_reduced_point_cloud(
data_path, val_info_path, save_path, back=True)
_create_reduced_point_cloud(
data_path, test_info_path, save_path, back=True)
if __name__ == "__main__":
fire.Fire()
| 39.036481
| 99
| 0.555604
|
b7f804d5f3d83c404be7dbcbd8ab14705061d989
| 4,232
|
py
|
Python
|
salt-2016.3.3/salt/beacons/service.py
|
stephane-martin/salt-debian-packaging
|
4ec73750ba67bfe35a5bc0faa110f2bdec5c6a66
|
[
"Apache-2.0"
] | null | null | null |
salt-2016.3.3/salt/beacons/service.py
|
stephane-martin/salt-debian-packaging
|
4ec73750ba67bfe35a5bc0faa110f2bdec5c6a66
|
[
"Apache-2.0"
] | null | null | null |
salt-2016.3.3/salt/beacons/service.py
|
stephane-martin/salt-debian-packaging
|
4ec73750ba67bfe35a5bc0faa110f2bdec5c6a66
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Send events covering service status
'''
# Import Python Libs
from __future__ import absolute_import
import os
import logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
LAST_STATUS = {}
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for service beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for service beacon must be a dictionary.')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Scan for the configured services and fire events
Example Config
.. code-block:: yaml
beacons:
service:
salt-master:
mysql:
The config above sets up beacons to check for
the salt-master and mysql services.
The config also supports two other parameters for each service:
`onchangeonly`: when `onchangeonly` is True the beacon will fire
events only when the service status changes. Otherwise, it will fire an
event at each beacon interval. The default is False.
`emitatstartup`: when `emitatstartup` is False the beacon will not fire
event when the minion is reload. Applicable only when `onchangeonly` is True.
The default is True.
`uncleanshutdown`: If `uncleanshutdown` is present it should point to the
location of a pid file for the service. Most services will not clean up
this pid file if they are shutdown uncleanly (e.g. via `kill -9`) or if they
are terminated through a crash such as a segmentation fault. If the file is
present, then the beacon will add `uncleanshutdown: True` to the event. If
not present, the field will be False. The field is only added when the
service is NOT running. Omitting the configuration variable altogether will
turn this feature off.
Please note that some init systems can remove the pid file if the service
registers as crashed. One such example is nginx on CentOS 7, where the
service unit removes the pid file when the service shuts down (IE: the pid
file is observed as removed when kill -9 is sent to the nginx master
process). The 'uncleanshutdown' option might not be of much use there,
unless the unit file is modified.
Here is an example that will fire an event whenever the state of nginx
changes and report an uncleanshutdown. This example is for Arch, which
places nginx's pid file in `/run`.
.. code-block:: yaml
beacons:
service:
nginx:
onchangeonly: True
uncleanshutdown: /run/nginx.pid
'''
ret = []
for service in config:
ret_dict = {}
ret_dict[service] = {'running': __salt__['service.status'](service)}
# If no options is given to the service, we fall back to the defaults
# assign a False value to oncleanshutdown and onchangeonly. Those
# key:values are then added to the service dictionary.
if config[service] is None:
defaults = {
'oncleanshutdown': False,
'emitatstartup': True,
'onchangeonly': False
}
config[service] = defaults
# We only want to report the nature of the shutdown
# if the current running status is False
# as well as if the config for the beacon asks for it
if 'uncleanshutdown' in config[service] and not ret_dict[service]['running']:
filename = config[service]['uncleanshutdown']
ret_dict[service]['uncleanshutdown'] = True if os.path.exists(filename) else False
if 'onchangeonly' in config[service] and config[service]['onchangeonly'] is True:
if service not in LAST_STATUS:
LAST_STATUS[service] = ret_dict[service]
if not service['emitatstartup']:
continue
else:
ret.append(ret_dict)
if LAST_STATUS[service] != ret_dict[service]:
LAST_STATUS[service] = ret_dict[service]
ret.append(ret_dict)
else:
ret.append(ret_dict)
return ret
| 35.864407
| 94
| 0.650047
|
32a489b163741313f336252f6fec4048b77f4c6a
| 2,541
|
py
|
Python
|
src/sentry/tasks/email.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 2
|
2019-03-04T12:45:54.000Z
|
2019-03-04T12:45:55.000Z
|
src/sentry/tasks/email.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | 196
|
2019-06-10T08:34:10.000Z
|
2022-02-22T01:26:13.000Z
|
src/sentry/tasks/email.py
|
uandco/sentry
|
5b8d45cb71c6617dac8e64265848623fbfce9c99
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.tasks.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from sentry.auth import access
from sentry.tasks.base import instrumented_task
from sentry.utils.email import send_messages
logger = logging.getLogger(__name__)
def _get_user_from_email(group, email):
from sentry.models import User
# TODO(dcramer): we should encode the userid in emails so we can avoid this
for user in User.objects.filter(email__iexact=email):
# Make sure that the user actually has access to this project
context = access.from_user(user=user, organization=group.organization)
if not any(context.has_team(t) for t in group.project.teams.all()):
logger.warning('User %r does not have access to group %r', user, group)
continue
return user
@instrumented_task(
name='sentry.tasks.email.process_inbound_email',
queue='email',
default_retry_delay=60 * 5,
max_retries=None
)
def process_inbound_email(mailfrom, group_id, payload):
"""
"""
from sentry.models import Event, Group
from sentry.web.forms import NewNoteForm
try:
group = Group.objects.select_related('project').get(pk=group_id)
except Group.DoesNotExist:
logger.warning('Group does not exist: %d', group_id)
return
user = _get_user_from_email(group, mailfrom)
if user is None:
logger.warning('Inbound email from unknown address: %s', mailfrom)
return
event = group.get_latest_event()
if event:
Event.objects.bind_nodes([event], 'data')
event.group = group
event.project = group.project
form = NewNoteForm({'text': payload})
if form.is_valid():
form.save(group, user, event=event)
@instrumented_task(
name='sentry.tasks.email.send_email',
queue='email',
default_retry_delay=60 * 5,
max_retries=None
)
def send_email(message):
# HACK(django18) Django 1.8 assumes that message objects have a reply_to attribute
# When a message is enqueued by django 1.6 we need to patch that property on
# so that the message can be converted to a stdlib one.
#
# See
# https://github.com/django/django/blob/c686dd8e6bb3817bcf04b8f13c025b4d3c3dc6dc/django/core/mail/message.py#L273-L274
if not hasattr(message, 'reply_to'):
message.reply_to = []
send_messages([message])
| 29.546512
| 122
| 0.693034
|
af119ba90c8cc55f1a1c712a6efe1e999b45f464
| 7,602
|
py
|
Python
|
selfdrive/car/interfaces.py
|
andy741217/dp
|
1fbd6c00e9dd6a7db2ec863cedb6d8fb1230a29a
|
[
"MIT"
] | null | null | null |
selfdrive/car/interfaces.py
|
andy741217/dp
|
1fbd6c00e9dd6a7db2ec863cedb6d8fb1230a29a
|
[
"MIT"
] | null | null | null |
selfdrive/car/interfaces.py
|
andy741217/dp
|
1fbd6c00e9dd6a7db2ec863cedb6d8fb1230a29a
|
[
"MIT"
] | null | null | null |
import os
import time
from typing import Dict
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS # 135 + 4 = 86 mph
# generic car and radar interfaces
class CarInterfaceBase():
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.low_speed_alert = False
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
self.dragonconf = None
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.
@staticmethod
def compute_gb(accel, speed):
raise NotImplementedError
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=None):
raise NotImplementedError
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint, has_relay):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.isPandaBlack = has_relay
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
# stock ACC by default
ret.enableCruise = True
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.gasMaxBP = [0.]
ret.gasMaxV = [.5] # half max brake
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.openpilotLongitudinalControl = False
ret.startAccel = 0.0
ret.minSpeedCan = 0.3
ret.stoppingBrakeRate = 0.2 # brake_travel/s while trying to stop
ret.startingBrakeRate = 0.8 # brake_travel/s while releasing on restart
ret.stoppingControl = False
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
return ret
# returns a car.CarState, pass in car.CarControl
def update(self, c, can_strings, dragonconf):
raise NotImplementedError
# return sendcan, pass in a car.CarControl
def apply(self, c):
raise NotImplementedError
def create_common_events(self, cs_out, extra_gears=[], gas_resume_speed=-1, pcm_enable=True): # pylint: disable=dangerous-default-value
events = Events()
if cs_out.doorOpen:
events.add(EventName.doorOpen)
if cs_out.seatbeltUnlatched:
events.add(EventName.seatbeltNotLatched)
if self.dragonconf.dpGearCheck and cs_out.gearShifter != GearShifter.drive and cs_out.gearShifter not in extra_gears:
events.add(EventName.wrongGear)
if cs_out.gearShifter == GearShifter.reverse:
events.add(EventName.reverseGear)
if not self.dragonconf.dpAtl and not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
if cs_out.gasPressed and not self.dragonconf.dpAllowGas and not self.dragonconf.dpAtl:
events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > self.dragonconf.dpMaxCtrlSpeed:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
if not self.dragonconf.dpLatCtrl:
events.add(EventName.manualSteeringRequired)
elif self.dragonconf.dpSteeringOnSignal and (cs_out.leftBlinker or cs_out.rightBlinker):
events.add(EventName.manualSteeringRequiredBlinkersOn)
elif cs_out.steerError:
events.add(EventName.steerUnavailable)
elif cs_out.steerWarning:
events.add(EventName.steerTempUnavailable)
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
# Optionally allow to press gas at zero speed to resume.
# e.g. Chrysler does not spam the resume button yet, so resuming with gas is handy. FIXME!
if self.dragonconf.dpAtl:
pass
elif self.dragonconf.dpAllowGas:
if cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill):
events.add(EventName.pedalPressed)
else:
if (cs_out.gasPressed and (not self.CS.out.gasPressed) and cs_out.vEgo > gas_resume_speed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase():
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase:
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def update_blinker(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
@staticmethod
def parse_gear_shifter(gear: str) -> car.CarState.GearShifter:
d: Dict[str, car.CarState.GearShifter] = {
'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake
}
return d.get(gear, GearShifter.unknown)
@staticmethod
def get_cam_can_parser(CP):
return None
@staticmethod
def get_body_can_parser(CP):
return None
| 35.690141
| 138
| 0.70955
|
45ddb0c52df5cd3dfeec6935de15450dda601c4c
| 1,401
|
py
|
Python
|
project/models/exchange_announcement.py
|
Larryrun80/hsadmin
|
00564a0a47db064a886fc481b44f321a98abdf78
|
[
"MIT"
] | null | null | null |
project/models/exchange_announcement.py
|
Larryrun80/hsadmin
|
00564a0a47db064a886fc481b44f321a98abdf78
|
[
"MIT"
] | null | null | null |
project/models/exchange_announcement.py
|
Larryrun80/hsadmin
|
00564a0a47db064a886fc481b44f321a98abdf78
|
[
"MIT"
] | null | null | null |
from .. import db
from .base_mt_view import BaseMTView
class ExchangeAnnouncement(db.Model):
__tablename__ = 'exchange_announcement'
# Columns
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
abstract = db.Column(db.String(500))
link = db.Column(db.String(255))
is_deleted = db.Column(db.Boolean)
created_at = db.Column(db.TIMESTAMP)
updated_at = db.Column(db.TIMESTAMP)
exchange_id = db.Column(db.Integer, db.ForeignKey('market.id'))
exchange = db.relationship('Market', back_populates='announcements')
class ExchangeAnnouncementView(BaseMTView):
can_create = True
can_edit = True
column_labels = dict(
title='标题',
link='链接',
is_deleted='隐藏',
abstract='摘要',
content='内容',
created_at='创建时间',
updated_at='修改时间'
)
column_descriptions = dict(
link='点击公告后跳转的地址,无需跳转则留空',
)
column_list = (
'title',
'is_deleted',
'created_at',
'updated_at'
)
column_sortable_list = ('created_at',)
column_searchable_list = ('content',)
column_default_sort = ('id', True)
column_editable_list = ('content', 'is_deleted')
form_columns = (
'exchange',
'title',
'abstract',
'content',
'link',
'is_deleted'
)
| 22.967213
| 72
| 0.608851
|
f3bdf62b36c6d3d397050dd6caad7b2ed62ca9d4
| 8,243
|
py
|
Python
|
data_loader/data_loader.py
|
bharat-b7/LoopReg
|
418797f28440a00a2f6489b041c0b4cabaa7997e
|
[
"Unlicense"
] | 67
|
2020-10-20T02:09:25.000Z
|
2022-03-25T03:19:40.000Z
|
data_loader/data_loader.py
|
bharat-b7/LoopReg
|
418797f28440a00a2f6489b041c0b4cabaa7997e
|
[
"Unlicense"
] | 14
|
2020-10-27T08:33:56.000Z
|
2021-09-10T09:12:09.000Z
|
data_loader/data_loader.py
|
bharat-b7/LoopReg
|
418797f28440a00a2f6489b041c0b4cabaa7997e
|
[
"Unlicense"
] | 9
|
2020-10-28T06:54:02.000Z
|
2021-11-15T13:31:14.000Z
|
"""
Dataloader for the network.
Author: Bharat
Cite: LoopReg: Self-supervised Learning of Implicit Surface Correspondences, Pose and Shape for 3D Human Mesh Registration, NeurIPS' 20.
"""
import os
from os.path import join, split, exists
import pickle as pkl
import numpy as np
from glob import glob
import codecs
# from kaolin.rep import TriangleMesh as tm
import trimesh
from psbody.mesh import Mesh
from lib.smpl_paths import SmplPaths
from torch.utils.data import Dataset, DataLoader
from make_data_split import DATA_PATH
# Number of points to sample from the scan
NUM_POINTS = 30000
class MyDataLoader(Dataset):
def __init__(self, mode, batch_sz, data_path=DATA_PATH,
split_file='assets/data_split_01.pkl', num_workers=12,
augment=False, naked=False):
self.mode = mode
self.path = data_path
with open(split_file, "rb") as f:
self.split = pkl.load(f)
self.data = self.split[mode]
self.batch_size = batch_sz
self.num_workers = num_workers
self.augment = augment
self.naked = naked
sp = SmplPaths(gender='male')
self.ref_smpl = sp.get_smpl()
self.vt, self.ft = sp.get_vt_ft()
# Load smpl part labels
with open('assets/smpl_parts_dense.pkl', 'rb') as f:
dat = pkl.load(f, encoding='latin-1')
self.smpl_parts = np.zeros((6890, 1))
for n, k in enumerate(dat):
self.smpl_parts[dat[k]] = n
def __len__(self):
return len(self.data)
def get_loader(self, shuffle=True):
return DataLoader(self, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=shuffle)
@staticmethod
def worker_init_fn(worker_id):
"""
Worker init function to ensure true randomness.
"""
base_seed = int(codecs.encode(os.urandom(4), 'hex'), 16)
np.random.seed(base_seed + worker_id)
@staticmethod
def map_mesh_points_to_reference(pts, src, ref):
"""
Finds closest points to pts on src.
Maps the closest points on src to ref.
"""
closest_face, closest_points = src.closest_faces_and_points(pts)
vert_ids, bary_coords = src.barycentric_coordinates_for_points(closest_points, closest_face.astype('int32'))
correspondences = (ref[vert_ids] * bary_coords[..., np.newaxis]).sum(axis=1)
return correspondences
@staticmethod
def map_vitruvian_vertex_color(tgt_vertices, registered_smpl_mesh,
path_to_cols='/BS/bharat-2/work/LearntRegistration/test_data/vitruvian_cols.npy'):
"""
Vitruvian vertex color are defined for SMPL mesh. This function maps these colors from registered smpl to scan.
"""
col = np.load(path_to_cols)
vids, _ = registered_smpl_mesh.closest_vertices(tgt_vertices)
vids = np.array(vids)
return col[vids]
@staticmethod
def get_rnd_rotations():
'''We want 2*pi rotation along z-axis and very small perturbations along x,y-axis'''
from scipy.spatial.transform import Rotation as R
rots = np.random.rand(1, 3)
rots[:, 0] *= np.pi * 0.01
rots[:, 2] *= np.pi * 0.01
rots[:, 1] *= np.pi * 2
t = R.from_rotvec(rots)
return t
def __getitem__(self, idx):
path = self.data[idx]
name = split(path)[1]
input_smpl = Mesh(filename=join(path, name + '_smpl.obj'))
if self.naked:
input_scan = Mesh(filename=join(path, name + '_smpl.obj'))
else:
input_scan = Mesh(filename=join(path, name + '.obj'))
temp = trimesh.Trimesh(vertices=input_scan.v, faces=input_scan.f)
points = temp.sample(NUM_POINTS)
if self.augment:
rot = self.get_rnd_rotations()
points = rot.apply(points)
input_smpl.v = rot.apply(input_smpl.v)
ind, _ = input_smpl.closest_vertices(points)
part_labels = self.smpl_parts[np.array(ind)]
correspondences = self.map_mesh_points_to_reference(points, input_smpl, self.ref_smpl.r)
if self.mode == 'train':
return {'scan': points.astype('float32'),
'correspondences': correspondences.astype('float32'),
'part_labels': part_labels.astype('float32'),
'name': path
}
vc = self.map_vitruvian_vertex_color(points, input_smpl)
return {'scan': points.astype('float32'),
'smpl': input_smpl.v.astype('float32'),
'correspondences': correspondences.astype('float32'),
'part_labels': part_labels.astype('float32'),
'scan_vc': vc,
'name': path
}
class MyDataLoaderCacher(MyDataLoader):
"""
Loads scan points, cached SMPL parameters, GT correspondences.
"""
def __init__(self, mode, batch_sz, data_path=DATA_PATH,
split_file='assets/data_split_01.pkl',
cache_suffix=None,
num_workers=12, augment=False, naked=False):
self.mode = mode
self.cache_suffix = cache_suffix
self.path = data_path
with open(split_file, "rb") as f:
self.split = pkl.load(f)
self.data = self.split[mode]
self.batch_size = batch_sz
self.num_workers = num_workers
self.augment = augment
self.naked = naked
sp = SmplPaths(gender='male')
self.ref_smpl = sp.get_smpl()
self.vt, self.ft = sp.get_vt_ft()
# Load smpl part labels
with open('assets/smpl_parts_dense.pkl', 'rb') as f:
dat = pkl.load(f, encoding='latin-1')
self.smpl_parts = np.zeros((6890, 1))
for n, k in enumerate(dat):
self.smpl_parts[dat[k]] = n
def __getitem__(self, idx):
path = self.data[idx]
name = split(path)[1]
input_smpl = Mesh(filename=join(path, name + '_smpl.obj'))
if self.naked:
input_scan = Mesh(filename=join(path, name + '_smpl.obj'))
else:
input_scan = Mesh(filename=join(path, name + '.obj'))
temp = trimesh.Trimesh(vertices=input_scan.v, faces=input_scan.f)
points = temp.sample(NUM_POINTS)
if self.augment:
rot = self.get_rnd_rotations()
points = rot.apply(points)
input_smpl.v = rot.apply(input_smpl.v)
ind, _ = input_smpl.closest_vertices(points)
part_labels = self.smpl_parts[np.array(ind)]
correspondences = self.map_mesh_points_to_reference(points, input_smpl, self.ref_smpl.r)
# Load cached SMPL params
cache_list = []
if self.cache_suffix is not None:
cache_list = sorted(glob(join(path, self.cache_suffix, '*.pkl')))
if len(cache_list) > 0:
smpl_dict = pkl.load(open(cache_list[-1], 'rb'), encoding='latin-1')
pose = smpl_dict['pose']
betas = smpl_dict['betas']
trans = smpl_dict['trans']
# print('Loading from cache ', cache_list[-1])
else:
pose = np.zeros((72,))
betas = np.zeros((10,))
trans = np.zeros((3,))
if self.mode == 'train':
return {'scan': points.astype('float32'),
'correspondences': correspondences.astype('float32'),
'part_labels': part_labels.astype('float32'),
'pose': pose.astype('float32'),
'betas': betas.astype('float32'),
'trans': trans.astype('float32'),
'name': path
}
vc = self.map_vitruvian_vertex_color(points, input_smpl)
return {'scan': points.astype('float32'),
'smpl': input_smpl.v.astype('float32'),
'correspondences': correspondences.astype('float32'),
'part_labels': part_labels.astype('float32'),
'pose': pose.astype('float32'),
'betas': betas.astype('float32'),
'trans': trans.astype('float32'),
'scan_vc': vc,
'name': path
}
| 37.468182
| 136
| 0.591896
|
521151e582e3dd9e11679dd01ff87dac866c5b82
| 1,006
|
py
|
Python
|
cogs/event.py
|
phantom0174/SQCS-Working-Bot
|
ae4b02e6b3fe403ae28a63f9cad8a35b04e89d45
|
[
"MIT"
] | 2
|
2021-04-11T15:55:56.000Z
|
2021-04-17T01:41:47.000Z
|
cogs/event.py
|
phantom0174/SQCS-Working-Bot
|
ae4b02e6b3fe403ae28a63f9cad8a35b04e89d45
|
[
"MIT"
] | null | null | null |
cogs/event.py
|
phantom0174/SQCS-Working-Bot
|
ae4b02e6b3fe403ae28a63f9cad8a35b04e89d45
|
[
"MIT"
] | 1
|
2021-04-13T17:49:46.000Z
|
2021-04-13T17:49:46.000Z
|
from core.classes import Cog_Extension
from discord.ext import commands
import discord
import core.functions as func
class Event(Cog_Extension):
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await ctx.send(content=f'`{error}`', delete_after=5.0)
@commands.Cog.listener()
async def on_command(self, ctx):
cmd_name = ctx.command.name
cmd_parents = ctx.command.full_parent_name
channel_name = ctx.channel.name
user_name = ctx.author.name
user_id = ctx.author.id
message = ctx.message.content
if len(cmd_parents) == 0:
cmd_parents = str('N/A')
log_msg = f'[{cmd_parents}][{cmd_name}], [{channel_name}], [{user_name}][{user_id}]\n[{message}]\n'
report_channel = discord.utils.get(self.bot.guilds[0].text_channels, name='working-report')
await report_channel.send(f'[cmd exec]{log_msg}[{func.now_time_info("whole")}]')
def setup(bot):
bot.add_cog(Event(bot))
| 31.4375
| 107
| 0.66501
|
deb398c373f90bb017870a304b095121ee740c25
| 4,845
|
py
|
Python
|
hydrogels/utils/io/_lammps.py
|
debeshmandal/hydrogels
|
3ca065c21ae834ab350f9fae78cee611f945d853
|
[
"MIT"
] | 3
|
2020-05-13T01:07:30.000Z
|
2021-02-12T13:37:23.000Z
|
hydrogels/utils/io/_lammps.py
|
softnanolab/hydrogels
|
cabdec969f44a3855f09c18878b988d1fa5b75f0
|
[
"MIT"
] | 24
|
2020-06-04T13:48:57.000Z
|
2021-12-31T18:46:52.000Z
|
hydrogels/utils/io/_lammps.py
|
softnanolab/hydrogels
|
cabdec969f44a3855f09c18878b988d1fa5b75f0
|
[
"MIT"
] | 1
|
2020-07-23T17:15:23.000Z
|
2020-07-23T17:15:23.000Z
|
import re
import pandas as pd
import numpy as np
from ._core import CoreReader
from typing import Union
from softnanotools.logger import Logger
logger = Logger(__name__)
class LAMMPSDataReader(CoreReader):
def __init__(
self,
fname: str,
names: Union[dict, list, tuple] = None,
species: dict = None,
classes: Union[dict, list, tuple] = None,
**kwargs
):
super().__init__()
self.fname = fname
if names == None:
self.names = None
else:
self.names = names
self.species = species
if classes == None:
self.classes = [None]
else:
self.classes = classes
self._read()
def _read(self):
"""Reads a LAMMPS Data File containing configuration and
topology information"""
box = {}
with open(self.fname, 'r') as f:
for i, line in enumerate(f.readlines()):
if re.findall("atoms", line):
n_atoms = int(line.split()[0])
elif re.findall("bonds", line):
n_bonds = int(line.split()[0])
elif re.findall("xlo xhi", line):
box['x'] = [float(j) for j in line.split()[:2]]
elif re.findall("ylo yhi", line):
box['y'] = [float(j) for j in line.split()[:2]]
elif re.findall("zlo zhi", line):
box['z'] = [float(j) for j in line.split()[:2]]
elif re.findall("Atoms", line):
skip_atoms = i + 1
elif re.findall("Bonds", line):
skip_bonds = i + 1
break
self.metadata['box'] = np.array([
([float(i) for i in box['x']][1] - [float(i) for i in box['x']][0]),
([float(i) for i in box['y']][1] - [float(i) for i in box['y']][0]),
([float(i) for i in box['z']][1] - [float(i) for i in box['z']][0]),
])
logger.debug(f'Box: {self.metadata["box"]}')
atoms = pd.read_csv(
self.fname,
delim_whitespace=True,
header=None,
nrows=n_atoms,
skiprows=skip_atoms,
).rename(columns={
0: 'id',
1: 'mol',
2: 'type',
3: 'x',
4: 'y',
5: 'z',
}).sort_values('id').reset_index(drop=True)
logger.debug(f'ATOMS:\n{atoms}')
try:
assert len(atoms) == n_atoms
assert atoms['id'].iloc[0] == 1
assert atoms['id'].iloc[-1] == n_atoms
except:
logger.error('Assertion Error when importing Atoms')
bonds = pd.read_csv(
self.fname,
delim_whitespace=True,
header=None,
nrows=n_bonds,
skiprows=skip_bonds,
).rename(columns={
0: 'id',
1: 'type',
2: 'atom_1',
3: 'atom_2',
}).sort_values('id').reset_index(drop=True)
logger.debug(f'BONDS:\n{bonds}')
try:
assert len(bonds) == n_bonds
assert bonds['id'].iloc[0] == 1
assert bonds['id'].iloc[-1] == n_bonds
except:
logger.error('Assertion Error when importing Bonds')
mols = set(list(atoms['mol']))
for idx, i in enumerate(mols):
if isinstance(self.names, dict):
name = self.names[i]
cls = self.classes[i]
elif isinstance(self.names, (list, tuple)):
name = self.names[idx]
cls = self.classes[idx]
else:
name = i
cls = None
mol = atoms[atoms['mol']==i]
logger.debug(f"For molecule[{idx+1}] {name}:\n\nAtoms:\n{mol}")
sequence = mol['type'].apply(
lambda x: self.species[x] if self.species != None else x
)
positions = mol[['x', 'y', 'z']]
edges = []
if cls != None:
for j, row in bonds.iterrows():
if row['atom_1'] in mol['id']:
edges.append((row['atom_1']-1, row['atom_2']-1))
elif row['atom_2'] in mol['id']:
edges.append((row['atom_1']-1, row['atom_2']-1))
logger.debug(f"Edges:\n{pd.DataFrame(edges)}")
if len(edges) != 0:
logger.info(f'Adding <{name}> to topology')
self.add_topology(name, list(sequence), positions.to_numpy(), edges, cls=cls)
else:
logger.info(f'Adding <{name}> to particles')
self.add_particles(name, positions.to_numpy())
# delete edges list
del edges
return
| 31.461039
| 93
| 0.467905
|
80b1bf1e6c8d9152041b21df7dbd89f0af540356
| 6,319
|
py
|
Python
|
QRServer/lobby/lobbyclient.py
|
Fruktus/QuadradiusPreservationProject
|
6ae90cfaa015ce0e585ea723887747481f831505
|
[
"MIT"
] | 11
|
2021-01-07T15:40:29.000Z
|
2022-03-25T00:53:50.000Z
|
QRServer/lobby/lobbyclient.py
|
Fruktus/QuadradiusPreservationProject
|
6ae90cfaa015ce0e585ea723887747481f831505
|
[
"MIT"
] | 3
|
2021-02-03T10:12:05.000Z
|
2022-01-30T10:26:04.000Z
|
QRServer/lobby/lobbyclient.py
|
Fruktus/QuadradiusPreservationProject
|
6ae90cfaa015ce0e585ea723887747481f831505
|
[
"MIT"
] | 3
|
2021-01-30T21:46:11.000Z
|
2022-03-25T00:53:53.000Z
|
import logging
from datetime import datetime
from QRServer import config
from QRServer.common import utils
from QRServer.common.classes import RankingEntry, LobbyPlayer
from QRServer.common.clienthandler import ClientHandler
from QRServer.common.messages import BroadcastCommentResponse, OldSwfResponse, LobbyDuplicateResponse, \
ServerAliveResponse, LobbyBadMemberResponse, LastPlayedResponse, ServerRankingResponse, HelloLobbyRequest, \
JoinLobbyRequest, ServerRecentRequest, ServerRankingRequest, ServerAliveRequest, LobbyStateResponse, \
ResponseMessage, LobbyChatMessage, SetCommentRequest, ChallengeMessage, ChallengeAuthMessage, DisconnectRequest, \
PolicyFileRequest, CrossDomainPolicyAllowAllResponse
from QRServer.db.connector import connector
log = logging.getLogger('lobby_client_handler')
class LobbyClientHandler(ClientHandler):
player: LobbyPlayer
def __init__(self, client_socket, lobby_server):
super().__init__(client_socket)
self.lobby_server = lobby_server
self.player = LobbyPlayer()
self.register_message_handler(PolicyFileRequest, self._handle_policy)
self.register_message_handler(HelloLobbyRequest, self._handle_hello_lobby)
self.register_message_handler(JoinLobbyRequest, self._handle_join_lobby)
self.register_message_handler(ServerRecentRequest, self._handle_server_recent)
self.register_message_handler(ServerRankingRequest, self._handle_server_ranking)
self.register_message_handler(ServerAliveRequest, self._handle_server_alive)
self.register_message_handler(SetCommentRequest, self._handle_set_comment)
self.register_message_handler(LobbyChatMessage, self._handle_broadcast)
self.register_message_handler(ChallengeMessage, self._handle_challenge)
self.register_message_handler(ChallengeAuthMessage, self._handle_challenge_auth)
self.register_message_handler(DisconnectRequest, self._handle_disconnect)
def get_username(self) -> str:
return self.player.username
def get_joined_at(self) -> datetime:
return self.player.joined_at
def get_player(self) -> LobbyPlayer:
return self.player
def _handle_policy(self, message: PolicyFileRequest):
log.debug('policy file requested')
self.send_msg(CrossDomainPolicyAllowAllResponse())
def _handle_hello_lobby(self, message: HelloLobbyRequest):
swf_version = message.get_swf_version()
if swf_version != 5:
self.send_msg(OldSwfResponse())
log.debug('Client with invalid version tried to connect, version: {}'.format(swf_version))
self.close()
def _handle_join_lobby(self, message: JoinLobbyRequest):
username = message.get_username()
password = message.get_password()
is_guest = utils.is_guest(username, password)
self.player.user_id = connector().authenticate_member(username, password)
if not is_guest and not config.auth_disable.get():
if self.player.user_id is None:
log.debug('Player {} tried to connect, but failed to authenticate'.format(username))
self._error_bad_member()
self.close()
return
if self.lobby_server.username_exists(username):
log.debug('Client duplicate in lobby: ' + username)
self.send_msg(LobbyDuplicateResponse())
self.close() # FIXME it seems that the connection shouldnt be completely closed
return
# user authenticated successfully, register with lobbyserver
self.player.username = username
self.player.joined_at = datetime.now()
self.player.communique = connector().get_comment(self.player.user_id) or ' '
self.player.idx = self.lobby_server.add_client(self)
self.send_msg(LobbyStateResponse(self.lobby_server.get_players()))
if is_guest:
log.info('Guest joined lobby: ' + username)
else:
log.info('Member joined lobby: ' + username)
def _handle_challenge(self, message: ChallengeMessage):
challenger_idx = message.get_challenger_idx()
challenged_idx = message.get_challenged_idx()
log.debug('Challenge issued')
self.lobby_server.challenge_user(challenger_idx, challenged_idx)
def _handle_challenge_auth(self, message: ChallengeAuthMessage):
challenger_idx = message.get_challenger_idx()
challenged_idx = message.get_challenged_idx()
challenger_auth = message.get_auth()
self.lobby_server.setup_challenge(challenger_idx, challenged_idx, challenger_auth)
def _handle_server_recent(self, message: ServerRecentRequest):
self.send_msg(self.lobby_server.get_last_logged())
self.send_msg(LastPlayedResponse([]))
def _handle_server_ranking(self, message: ServerRankingRequest):
self.send_msg(ServerRankingResponse(True, [
RankingEntry(player='test', wins=12, games=30),
RankingEntry(player='test2', wins=2, games=2),
]))
def _handle_server_alive(self, message: ServerAliveRequest):
self.send_msg(ServerAliveResponse())
def _handle_set_comment(self, message: SetCommentRequest):
who = message.get_idx()
comment = message.get_comment()
if who != self.player.idx:
log.debug('Error while setting comment: wrong idx, expected {} was {}'.format(self.player.idx, who))
return
if self.player.user_id:
connector().set_comment(self.player.user_id, comment)
self.player.comment = comment
self.lobby_server.broadcast_msg(BroadcastCommentResponse(who, comment))
def _handle_broadcast(self, message):
if not isinstance(message, ResponseMessage):
raise Exception('Trying to send a non-response message')
self.lobby_server.broadcast_msg(message)
def _handle_disconnect(self, message: DisconnectRequest):
log.debug('Connection closed by client')
if self.player.idx is not None:
log.info('Player left lobby: {}'.format(self.player.username))
self.lobby_server.remove_client(self.player.idx)
self.close()
def _error_bad_member(self):
self.send_msg(LobbyBadMemberResponse())
| 44.815603
| 118
| 0.718943
|
376d9fa34d60caa92fd2c283c51725604a0bca2e
| 7,020
|
py
|
Python
|
aov_stats.py
|
pkollias/GatingInWorkingMemory
|
fc7fa544751cc878b89215d90acf0514345174e1
|
[
"MIT"
] | null | null | null |
aov_stats.py
|
pkollias/GatingInWorkingMemory
|
fc7fa544751cc878b89215d90acf0514345174e1
|
[
"MIT"
] | null | null | null |
aov_stats.py
|
pkollias/GatingInWorkingMemory
|
fc7fa544751cc878b89215d90acf0514345174e1
|
[
"MIT"
] | null | null | null |
from rec_format import *
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from itertools import groupby
import pandas as pd
import numpy as np
import warnings
def aov(df, y, x_a, x_b):
formula = "{0} ~ C({1})".format(y, x_a) if x_b is None else '{0} ~ C({1}) + C({2}) + C({1}):C({2})'.format(y, x_a, x_b)
model = ols(formula, df).fit()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
aov_table = anova_lm(model, typ=2)
omega_squared(aov_table)
return omega_squared(aov_table)
def eta_squared(aov):
aov['eta_sq'] = 'NaN'
aov['eta_sq'] = aov[:-1]['sum_sq']/sum(aov['sum_sq'])
return aov
def omega_squared(aov):
mse = aov['sum_sq'][-1]/aov['df'][-1]
aov['omega_sq'] = 'NaN'
aov['omega_sq'] = (aov[:-1]['sum_sq']-(aov[:-1]['df']*mse))/(sum(aov['sum_sq'])+mse)
return aov
def aov_2_shuffles(df, y, x_a, x_b, x_ab, num_shuffles=1, group_column_list=[]):
def shuffle_df(df, y, x_a, x_b, shuffle_i):
return pd.concat([df[x_a].sample(frac=1, random_state=shuffle_i).reset_index(drop=True),
df[x_b].sample(frac=1, random_state=shuffle_i).reset_index(drop=True),
df[y].reset_index(drop=True)], axis=1)
observed_aov = aov(df, y, x_a, x_b)
# anovas
anovas = pd.DataFrame(columns=[x_a, x_b, x_ab])
# observed
anovas.loc['observed'] = list(observed_aov['omega_sq'][0:3])
# shuffled
for shuffle_i in range(num_shuffles - 1):
if group_column_list == []:
df_shuffle = shuffle_df(df, y, x_a, x_b, shuffle_i)
else:
df_grouper = df.groupby(group_column_list)
dfg_list = [df_grouper.get_group(k) for k in df_grouper.groups.keys()]
df_shuffle_list = [shuffle_df(dfg, y, x_a, x_b, shuffle_i)
for dfg in dfg_list]
df_shuffle = pd.concat(df_shuffle_list, axis=0, ignore_index=True)
anovas.loc['shuffle_{0:04d}'.format(shuffle_i)] = list(aov(df_shuffle, y, x_a, x_b)['omega_sq'][0:3])
return (anovas, observed_aov)
def aov_1_shuffles(df, y, x, num_shuffles=1, group_column_list=[]):
def shuffle_df(df, y, x, shuffle_i):
return pd.concat([df[x].sample(frac=1, random_state=shuffle_i).reset_index(drop=True),
df[y].reset_index(drop=True)], axis=1)
observed_aov = aov(df, y, x, None)
# anovas
anovas = pd.DataFrame(columns=[x])
# observed
anovas.loc['observed'] = [observed_aov['omega_sq'][0]]
# shuffled
for shuffle_i in range(num_shuffles - 1):
if group_column_list == []:
df_shuffle = shuffle_df(df, y, x, shuffle_i)
else:
df_grouper = df.groupby(group_column_list)
dfg_list = [df_grouper.get_group(k) for k in df_grouper.groups.keys()]
df_shuffle_list = [shuffle_df(dfg, y, x, shuffle_i)
for dfg in dfg_list]
df_shuffle = pd.concat(df_shuffle_list, axis=0, ignore_index=True)
anovas.loc['shuffle_{0:04d}'.format(shuffle_i)] = [aov(df_shuffle, y, x, None)['omega_sq'][0]]
return (anovas, observed_aov)
def aov_2_shuffle_results(df, anovas, observed_aov, y, x_a, x_b, x_ab):
unit_results = {'means': {x_a: df[[x_a, y]].groupby(x_a).mean().dropna(),
x_b: df[[x_b, y]].groupby(x_b).mean().dropna(),
x_ab.format(x_a, x_b): df[[x_a, x_b, y]].groupby([x_a, x_b]).mean().dropna()},
'anova': observed_aov,
'shuffles': anovas}
return unit_results
def aov_1_shuffle_results(df, anovas, observed_aov, y, x):
unit_results = {'means': {x: df[[x, y]].groupby(x).mean().dropna()},
'anova': observed_aov,
'shuffles': anovas}
return unit_results
def aov_shuffle_and_results(df, y, x_list, num_shuffles, group_column_list = []):
if len(x_list) == 2:
x_a = x_list[0]
x_b = x_list[1]
x_ab = interaction_term(x_a, x_b)
anovas, observed_aov = aov_2_shuffles(df, y, x_a, x_b, x_ab, num_shuffles, group_column_list)
unit_results = aov_2_shuffle_results(df, anovas, observed_aov, y, x_a, x_b, x_ab)
elif len(x_list) == 1:
x = x_list[0]
anovas, observed_aov = aov_1_shuffles(df, y, x, num_shuffles, group_column_list)
unit_results = aov_1_shuffle_results(df, anovas, observed_aov, y, x)
return unit_results
def sorted_percentile(sorted_array, perc):
pos = (len(sorted_array) - 1) * perc / 100
x1 = int(np.floor(pos))
x2 = x1 + 1 if x1 == pos else int(np.ceil(pos))
y1 = sorted_array[x1]
y2 = sorted_array[x2]
slope = (y2 - y1) / (x2 - x1)
intercept = y1 - slope * x1
return slope * pos + intercept
# round omega squared values and correct for zero shuffles (all values equal to zero)
# return unit validity, omega distribution, threshold, omega difference above threshold, and zscored observed
def evaluate_anova_shuffles(shuffles_series, percentile, shuffles):
rounding_decimal = 10
anova_round = lambda x: round(x, rounding_decimal)
shuffles_index = [shuffle_to_name(ii) for ii in range(shuffles)]
if len(shuffles_series) == 1:
valid = False
omega_sq_distr = pd.Series(data=0, index=shuffles_index)
omega_sq_observed = omega_sq_distr.loc[shuffle_to_name(0)]
threshold = np.inf
omega_sq_diff = pd.Series(data=-np.inf, index=shuffles_index)
zscore = 0
else:
valid = True
omega_sq_distr = shuffles_series.apply(anova_round)
omega_sq_observed = omega_sq_distr.loc[shuffle_to_name(0)]
shuffle_values = omega_sq_distr.loc[~omega_sq_distr.index.isin([shuffle_to_name(0)])]
omega_sq_mean = np.mean(shuffle_values)
omega_sq_std = np.std(shuffle_values)
threshold = anova_round(np.percentile(shuffle_values, percentile, interpolation='linear'))
omega_sq_diff = omega_sq_distr.apply(lambda x: anova_round(x - threshold))
zscore = anova_round((omega_sq_observed - omega_sq_mean) / omega_sq_std) if omega_sq_std > 0 else 0
return {'valid': valid,
'omega_sq_distr': omega_sq_distr,
'omega_sq_observed': omega_sq_observed,
'threshold': threshold,
'omega_sq_diff': omega_sq_diff,
'zscore': zscore}
def clusters_from_shuffle_list(omega_sq_diff_list):
# returns cluster list of cluster_result cr where each cluster consists of a list of consecutive significant timebin
# tuples of the time value and the cluster difference
clusters_list = [list(gr) for val, gr in groupby(omega_sq_diff_list, key=lambda x: x[1] > 0) if val]
# convert that into a list of tuples [(timebins, diff_values), ...]
return [tuple(map(list, zip(*cl))) for cl in clusters_list]
def get_clusters(clusters):
return [{'bins': cv[0], 'val': sum(cv[1])} for cv in clusters]
| 39.217877
| 123
| 0.63661
|
7081fd1e35d01ce93cc4b4ca44bbe27fe62cf8ff
| 7,085
|
py
|
Python
|
ortools/constraint_solver/samples/cvrp.py
|
kharazian/or-tools-em
|
8df912821e013203523ba433ff2babbbc91c6a4b
|
[
"Apache-2.0"
] | null | null | null |
ortools/constraint_solver/samples/cvrp.py
|
kharazian/or-tools-em
|
8df912821e013203523ba433ff2babbbc91c6a4b
|
[
"Apache-2.0"
] | null | null | null |
ortools/constraint_solver/samples/cvrp.py
|
kharazian/or-tools-em
|
8df912821e013203523ba433ff2babbbc91c6a4b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# This Python file uses the following encoding: utf-8
# Copyright 2015 Tin Arm Engineering AB
# Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Capacitated Vehicle Routing Problem (CVRP).
This is a sample using the routing library python wrapper to solve a CVRP
problem.
A description of the problem can be found here:
http://en.wikipedia.org/wiki/Vehicle_routing_problem.
Distances are in meters.
"""
from functools import partial
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
###########################
# Problem Data Definition #
###########################
def create_data_model():
"""Stores the data for the problem"""
data = {}
# Locations in block unit
_locations = \
[(4, 4), # depot
(2, 0), (8, 0), # locations to visit
(0, 1), (1, 1),
(5, 2), (7, 2),
(3, 3), (6, 3),
(5, 5), (8, 5),
(1, 6), (2, 6),
(3, 7), (6, 7),
(0, 8), (7, 8)]
# Compute locations in meters using the block dimension defined as follow
# Manhattan average block: 750ft x 264ft -> 228m x 80m
# here we use: 114m x 80m city block
# src: https://nyti.ms/2GDoRIe 'NY Times: Know Your distance'
data['locations'] = [(l[0] * 114, l[1] * 80) for l in _locations]
data['num_locations'] = len(data['locations'])
data['demands'] = \
[0, # depot
1, 1, # 1, 2
2, 4, # 3, 4
2, 4, # 5, 6
8, 8, # 7, 8
1, 2, # 9,10
1, 2, # 11,12
4, 4, # 13, 14
8, 8] # 15, 16
data['num_vehicles'] = 4
data['vehicle_capacity'] = 15
data['depot'] = 0
return data
#######################
# Problem Constraints #
#######################
def manhattan_distance(position_1, position_2):
"""Computes the Manhattan distance between two points"""
return (
abs(position_1[0] - position_2[0]) + abs(position_1[1] - position_2[1]))
def create_distance_evaluator(data):
"""Creates callback to return distance between points."""
_distances = {}
# precompute distance between location to have distance callback in O(1)
for from_node in range(data['num_locations']):
_distances[from_node] = {}
for to_node in range(data['num_locations']):
if from_node == to_node:
_distances[from_node][to_node] = 0
else:
_distances[from_node][to_node] = (manhattan_distance(
data['locations'][from_node], data['locations'][to_node]))
def distance_evaluator(manager, from_node, to_node):
"""Returns the manhattan distance between the two nodes"""
return _distances[manager.IndexToNode(from_node)][manager.IndexToNode(
to_node)]
return distance_evaluator
def create_demand_evaluator(data):
"""Creates callback to get demands at each location."""
_demands = data['demands']
def demand_evaluator(manager, node):
"""Returns the demand of the current node"""
return _demands[manager.IndexToNode(node)]
return demand_evaluator
def add_capacity_constraints(routing, data, demand_evaluator_index):
"""Adds capacity constraint"""
capacity = 'Capacity'
routing.AddDimension(
demand_evaluator_index,
0, # null capacity slack
data['vehicle_capacity'],
True, # start cumul to zero
capacity)
###########
# Printer #
###########
def print_solution(data, routing, manager, assignment): # pylint:disable=too-many-locals
"""Prints assignment on console"""
print('Objective: {}'.format(assignment.ObjectiveValue()))
total_distance = 0
total_load = 0
capacity_dimension = routing.GetDimensionOrDie('Capacity')
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
distance = 0
while not routing.IsEnd(index):
load_var = capacity_dimension.CumulVar(index)
plan_output += ' {} Load({}) -> '.format(
manager.IndexToNode(index), assignment.Value(load_var))
previous_index = index
index = assignment.Value(routing.NextVar(index))
distance += routing.GetArcCostForVehicle(previous_index, index,
vehicle_id)
load_var = capacity_dimension.CumulVar(index)
plan_output += ' {0} Load({1})\n'.format(
manager.IndexToNode(index), assignment.Value(load_var))
plan_output += 'Distance of the route: {}m\n'.format(distance)
plan_output += 'Load of the route: {}\n'.format(
assignment.Value(load_var))
print(plan_output)
total_distance += distance
total_load += assignment.Value(load_var)
print('Total Distance of all routes: {}m'.format(total_distance))
print('Total Load of all routes: {}'.format(total_load))
########
# Main #
########
def main():
"""Entry point of the program"""
# Instantiate the data problem.
data = create_data_model()
# Create the routing index manager
manager = pywrapcp.RoutingIndexManager(data['num_locations'],
data['num_vehicles'], data['depot'])
# Create Routing Model
routing = pywrapcp.RoutingModel(manager)
# Define weight of each edge
distance_evaluator = routing.RegisterTransitCallback(
partial(create_distance_evaluator(data), manager))
routing.SetArcCostEvaluatorOfAllVehicles(distance_evaluator)
# Add Capacity constraint
demand_evaluator_index = routing.RegisterUnaryTransitCallback(
partial(create_demand_evaluator(data), manager))
add_capacity_constraints(routing, data, demand_evaluator_index)
# Setting first solution heuristic (cheapest addition).
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC) # pylint: disable=no-member
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit.FromSeconds(1)
# Solve the problem.
assignment = routing.SolveWithParameters(search_parameters)
print_solution(data, routing, manager, assignment)
if __name__ == '__main__':
main()
| 35.782828
| 95
| 0.638391
|
58bc127957fff8e958616136b7ddb2643a5fd470
| 4,863
|
py
|
Python
|
gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py
|
zhjc/gpdb
|
a6fec04a5bdab2ee00255747800144de9c76d65c
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2019-03-16T15:09:48.000Z
|
2019-03-16T15:09:48.000Z
|
gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py
|
zhjc/gpdb
|
a6fec04a5bdab2ee00255747800144de9c76d65c
|
[
"PostgreSQL",
"Apache-2.0"
] | 2
|
2019-03-02T16:16:00.000Z
|
2019-03-20T05:52:31.000Z
|
gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py
|
flutteringelf/gpdb
|
bbd27b4be3c454c6aca3dd75e3e115eff627c035
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
import os
from behave import given, when, then
from test.behave_utils.utils import (
stop_database,
run_command,
stop_primary,
trigger_fts_probe,
run_gprecoverseg,
execute_sql,
)
from addmirrors_mgmt_utils import (add_three_mirrors)
def assert_successful_command(context):
if context.ret_code != 0:
raise Exception('%s : %s' % (context.error_message, context.stdout_message))
def run_recovery_for_segments(context):
run_command(context, "gprecoverseg -aFv")
assert_successful_command(context)
def create_cluster(context, with_mirrors=True):
context.initial_cluster_size = 3
context.current_cluster_size = context.initial_cluster_size
cmd = """
cd ../gpAux/gpdemo; \
export MASTER_DEMO_PORT={master_port} && \
export DEMO_PORT_BASE={port_base} && \
export NUM_PRIMARY_MIRROR_PAIRS={num_primary_mirror_pairs} && \
export WITH_MIRRORS={with_mirrors} && \A
./demo_cluster.sh -d && ./demo_cluster.sh -c && \
./demo_cluster.sh
""".format(master_port=os.getenv('MASTER_PORT', 15432),
port_base=os.getenv('PORT_BASE', 25432),
num_primary_mirror_pairs=os.getenv(
'NUM_PRIMARY_MIRROR_PAIRS', context.initial_cluster_size),
with_mirrors=('true' if with_mirrors else 'false'))
run_command(context, cmd)
assert_successful_command(context)
def ensure_temp_directory_is_empty(context, temp_directory):
run_command(context, "rm -rf /tmp/{temp_directory}".format(
temp_directory=temp_directory))
def expand(context):
ensure_temp_directory_is_empty(context, "behave_test_expansion_primary")
ensure_temp_directory_is_empty(context, "behave_test_expansion_mirror")
expansion_command = """gpexpand --input <(echo '
localhost:localhost:25438:/tmp/behave_test_expansion_primary:8:3:p
localhost:localhost:25439:/tmp/behave_test_expansion_mirror:9:3:m
')
"""
# Initialize
run_command(context, expansion_command)
assert_successful_command(context)
# Redistribute tables
run_command(context, expansion_command)
assert_successful_command(context)
def ensure_primary_mirror_switched_roles():
results = execute_sql(
"postgres",
"select * from gp_segment_configuration where preferred_role <> role"
)
if results.rowcount != 2:
raise Exception("expected 2 segments to not be in preferred roles")
@given(u'I have a machine with no cluster')
def step_impl(context):
stop_database(context)
@when(u'a mirror has crashed')
def step_impl(context):
run_command(context, "ps aux | grep dbfast_mirror1 | awk '{print $2}' | xargs kill -9")
@when(u'I create a cluster')
def step_impl(context):
create_cluster(context)
@then(u'the primaries and mirrors should be replicating using replication slots')
def step_impl(context):
result_cursor = execute_sql(
"postgres",
"select pg_get_replication_slots() from gp_dist_random('gp_id') order by gp_segment_id"
)
if result_cursor.rowcount != context.current_cluster_size:
raise Exception("expected all %d primaries to have replication slots, only %d have slots" % (context.current_cluster_size, results.rowcount))
for content_id, result in enumerate(result_cursor.fetchall()):
if not result[0].startswith('(internal_wal_replication_slot,,physical,,t,'):
raise Exception(
"expected replication slot to be active for content id %d, got %s" %
(content_id, result[0])
)
@then(u'the mirrors should not have replication slots')
def step_impl(context):
result_cursor = execute_sql(
"postgres",
"select datadir from gp_segment_configuration where role='m';"
)
for content_id, result in enumerate(result_cursor.fetchall()):
path_to_replslot = os.path.join(result[0], 'pg_replslot')
if len(os.listdir(path_to_replslot)) > 0:
raise Exception("expected replication slot directory to be empty")
@given(u'a preferred primary has failed')
def step_impl(context):
stop_primary(context, 0)
@when('primary and mirror switch to non-preferred roles')
def step_impl(context):
trigger_fts_probe()
run_gprecoverseg()
ensure_primary_mirror_switched_roles()
@given("I cluster with no mirrors")
def step_impl(context):
create_cluster(context, with_mirrors=False)
@when("I add mirrors to the cluster")
def step_impl(context):
add_three_mirrors(context)
@given("I create a cluster")
def step_impl(context):
create_cluster(context, with_mirrors=True)
@when("I fully recover a mirror")
def step_impl(context):
run_recovery_for_segments(context)
@when("I add a segment to the cluster")
def step_imp(context):
context.current_cluster_size = 4
expand(context)
| 29.295181
| 149
| 0.711701
|
13ce3800ae23d84eb44be2f40bb6337700f26cb1
| 8,665
|
py
|
Python
|
siding/profile.py
|
stendec/siding
|
a602504a10c58eb656467820f4d45081dbbfb50b
|
[
"Apache-2.0"
] | 4
|
2015-02-17T17:45:27.000Z
|
2019-09-28T11:51:27.000Z
|
siding/profile.py
|
stendec/siding
|
a602504a10c58eb656467820f4d45081dbbfb50b
|
[
"Apache-2.0"
] | null | null | null |
siding/profile.py
|
stendec/siding
|
a602504a10c58eb656467820f4d45081dbbfb50b
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
#
# Copyright 2012 Siding Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
A profile system that provides both a :class:`PySide.QtCore.QSettings` instance
for storing and retrieving settings values, as well as functions for
determining file locations between the profile directory and the application
root.
"""
###############################################################################
# Imports
###############################################################################
import os
import argparse
import sys
from PySide.QtCore import QSettings
from siding import path
###############################################################################
# Logging
###############################################################################
import logging
log = logging.getLogger("siding.profile")
###############################################################################
# Constants and Storage
###############################################################################
name = 'default'
settings = None
portable = False
profile_path = None
root_path = None
###############################################################################
# Internal Functions
###############################################################################
def assert_profile():
""" Raise an exception if a profile hasn't been loaded. """
if settings is None:
raise RuntimeError("A profile hasn't been loaded.")
def ensure_paths():
""" Ensure profile_path is set, and that it's registered with path. """
global profile_path
global root_path
# If we don't have root path, don't worry about it.
if not root_path:
root_path = path.root()
# If we don't have profile_path, make it.
if not profile_path:
if portable:
# Well, bah. Okay... *now* care about the root path.
if not root_path:
root_path = path.root()
profile_path = root_path
else:
profile_path = path.appdata()
# Add the "Profiles/<profile>" bit to the profile path and ensure it
# exists.
profile_path = os.path.join(profile_path, 'Profiles', name)
if not os.path.exists(profile_path):
os.makedirs(profile_path)
path.add_source(profile_path)
###############################################################################
# Settings Getters / Setters
###############################################################################
def contains(key):
"""
Returns true if key exists in the loaded profile, or false if it does not.
"""
assert_profile()
return settings.contains(key)
def keys():
""" Return a list of all the keys in the loaded profile. """
assert_profile()
return settings.allKeys()
def set(key, value):
"""
Sets the value of key to value in the loaded profile. If the key already
exists, the existing value is overwritten.
"""
assert_profile()
settings.setValue(key, value)
def get(key, default=None):
"""
Returns the value of key in the loaded profile. If the key doesn't exist,
the provided default will be returned.
"""
assert_profile()
return settings.value(key, default)
def remove(key):
""" Delete the key from the loaded profile. """
assert_profile()
settings.remove(key)
###############################################################################
# Initialization
###############################################################################
def initialize(args=None, **kwargs):
"""
Initialize the profile system. You may use the following arguments to
configure the profile system:
============= ============ ============
Argument Default Description
============= ============ ============
portable ``False`` If True, the profile system will create a profile path within the root folder, allowing the application to work as a portable app.
profile ``default`` The name of the profile to load.
sources ``[]`` A list of additional sources for the path system to use. These will be fed into :func:`siding.path.add_source`, along with any sources from the command line.
profile_path If this is set, load the profile from this path rather than building a path.
root_path The application root directory. This is always the last source to be used by the path system.
============= ============ ============
.. warning::
``root_path`` will *probably* not work as expected after your
application is frozen into an executable, so be sure to test that it's
working properly before distributing your application if you're using
``root_path``.
In addition, you can provide a list of command line arguments to have
siding load them automatically. Example::
siding.profile.initialize(sys.argv[1:])
The following command line arguments are supported:
=================== ============
Argument Description
=================== ============
``--portable`` If True, the profile system will create a profile path within the root folder, allowing the application to work as a portable app.
``--profile`` The name of the profile to load.
``--profile-path`` The path to load the profile from.
``--root-path`` The application root directory.
``--source`` An additional source for the path system. This can be used multiple times.
=================== ============
"""
global name
global portable
global profile_path
global root_path
global settings
# Set the defaults now.
portable = kwargs.get('portable', False)
name = kwargs.get('profile', 'default')
# And load the paths if we've got them.
root_path = kwargs.get('root_path', root_path)
profile_path = kwargs.get('profile_path', profile_path)
# Get the source list.
sources = kwargs.get('sources', [])
# Now, parse the options we've got.
if args:
if args is True:
args = sys.argv[1:]
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--portable', action='store_true', default=None)
parser.add_argument('--profile')
parser.add_argument('--profile-path')
parser.add_argument('--root-path')
parser.add_argument('--source', action='append')
options = parser.parse_known_args(args)[0]
# Let's set stuff up then.
if options.portable is not None:
portable = options.portable
if options.profile:
name = options.profile
if options.profile_path:
profile_path = options.profile_path
if not os.path.exists(profile_path):
os.makedirs(profile_path)
if options.root_path:
root_path = options.root_path
if not os.path.exists(root_path):
parser.error("The specified root path doesn't exist.")
if options.source:
for source in options.source:
if not source in sources:
if not os.path.exists(source):
parser.error("The source %r doesn't exist." % source)
sources.append(source)
# Now, do the path stuff.
for source in sources:
path.add_source(source)
# Do we already have our paths?
if profile_path or root_path:
path.add_source(profile_path)
# Make sure.
ensure_paths()
# Now, open the settings file with QSettings and we're done.
file = os.path.join(profile_path, 'settings.ini')
settings = QSettings(file, QSettings.IniFormat)
log.info(u'Using profile: %s (%s)' % (name, profile_path))
log.debug(u'settings.ini contains %d keys across %d groups.' % (
len(settings.allKeys()), len(settings.childGroups())))
| 34.939516
| 190
| 0.553029
|
3994ba9f85c377b8aec6284ea78d79044ca2444b
| 96,476
|
py
|
Python
|
src/twisted/web/test/test_http.py
|
SandySalvatore/twisted
|
e08cf823633472288e727e6cace8454187935bae
|
[
"Unlicense",
"MIT"
] | 1
|
2017-01-14T15:20:35.000Z
|
2017-01-14T15:20:35.000Z
|
src/twisted/web/test/test_http.py
|
SandySalvatore/twisted
|
e08cf823633472288e727e6cace8454187935bae
|
[
"Unlicense",
"MIT"
] | 1
|
2016-10-10T23:49:15.000Z
|
2016-10-10T23:49:15.000Z
|
src/twisted/web/test/test_http.py
|
SandySalvatore/twisted
|
e08cf823633472288e727e6cace8454187935bae
|
[
"Unlicense",
"MIT"
] | 1
|
2019-01-29T06:37:19.000Z
|
2019-01-29T06:37:19.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test HTTP support.
"""
from __future__ import absolute_import, division
import random, cgi, base64, calendar
try:
from urlparse import urlparse, urlunsplit, clear_cache
except ImportError:
from urllib.parse import urlparse, urlunsplit, clear_cache
from zope.interface import provider
from twisted.python.compat import (_PY3, iterbytes, networkString, unicode,
intToBytes, NativeStringIO)
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.web import http, http_headers, iweb
from twisted.web.http import PotentialDataLoss, _DataLoss
from twisted.web.http import _IdentityTransferDecoder
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionLost
from twisted.protocols import loopback
from twisted.test.proto_helpers import StringTransport
from twisted.test.test_internet import DummyProducer
from twisted.web.test.requesthelper import DummyChannel
class DateTimeTests(unittest.TestCase):
"""Test date parsing functions."""
def testRoundtrip(self):
for i in range(10000):
time = random.randint(0, 2000000000)
timestr = http.datetimeToString(time)
time2 = http.stringToDatetime(timestr)
self.assertEqual(time, time2)
def testStringToDatetime(self):
dateStrings = [
b"Sun, 06 Nov 1994 08:49:37 GMT",
b"06 Nov 1994 08:49:37 GMT",
b"Sunday, 06-Nov-94 08:49:37 GMT",
b"06-Nov-94 08:49:37 GMT",
b"Sunday, 06-Nov-1994 08:49:37 GMT",
b"06-Nov-1994 08:49:37 GMT",
b"Sun Nov 6 08:49:37 1994",
b"Nov 6 08:49:37 1994",
]
dateInt = calendar.timegm((1994, 11, 6, 8, 49, 37, 6, 6, 0))
for dateString in dateStrings:
self.assertEqual(http.stringToDatetime(dateString), dateInt)
self.assertEqual(
http.stringToDatetime(b"Thursday, 29-Sep-16 17:15:29 GMT"),
calendar.timegm((2016, 9, 29, 17, 15, 29, 3, 273, 0)))
class DummyHTTPHandler(http.Request):
def process(self):
self.content.seek(0, 0)
data = self.content.read()
length = self.getHeader(b'content-length')
if length is None:
length = networkString(str(length))
request = b"'''\n" + length + b"\n" + data + b"'''\n"
self.setResponseCode(200)
self.setHeader(b"Request", self.uri)
self.setHeader(b"Command", self.method)
self.setHeader(b"Version", self.clientproto)
self.setHeader(b"Content-Length", intToBytes(len(request)))
self.write(request)
self.finish()
@provider(iweb.INonQueuedRequestFactory)
class DummyNewHTTPHandler(DummyHTTPHandler):
"""
This is exactly like the DummyHTTPHandler but it takes only one argument
in its constructor, with no default arguments. This exists to test an
alternative code path in L{HTTPChannel}.
"""
def __init__(self, channel):
DummyHTTPHandler.__init__(self, channel)
class DelayedHTTPHandler(DummyHTTPHandler):
"""
Like L{DummyHTTPHandler}, but doesn't respond immediately.
"""
def process(self):
pass
def delayedProcess(self):
DummyHTTPHandler.process(self)
class LoopbackHTTPClient(http.HTTPClient):
def connectionMade(self):
self.sendCommand(b"GET", b"/foo/bar")
self.sendHeader(b"Content-Length", 10)
self.endHeaders()
self.transport.write(b"0123456789")
class ResponseTestMixin(object):
"""
A mixin that provides a simple means of comparing an actual response string
to an expected response string by performing the minimal parsing.
"""
def assertResponseEquals(self, responses, expected):
"""
Assert that the C{responses} matches the C{expected} responses.
@type responses: C{bytes}
@param responses: The bytes sent in response to one or more requests.
@type expected: C{list} of C{tuple} of C{bytes}
@param expected: The expected values for the responses. Each tuple
element of the list represents one response. Each byte string
element of the tuple is a full header line without delimiter, except
for the last element which gives the full response body.
"""
for response in expected:
expectedHeaders, expectedContent = response[:-1], response[-1]
# Intentionally avoid mutating the inputs here.
expectedStatus = expectedHeaders[0]
expectedHeaders = expectedHeaders[1:]
headers, rest = responses.split(b'\r\n\r\n', 1)
headers = headers.splitlines()
status = headers.pop(0)
self.assertEqual(expectedStatus, status)
self.assertEqual(set(headers), set(expectedHeaders))
content = rest[:len(expectedContent)]
responses = rest[len(expectedContent):]
self.assertEqual(content, expectedContent)
class HTTP1_0Tests(unittest.TestCase, ResponseTestMixin):
requests = (
b"GET / HTTP/1.0\r\n"
b"\r\n"
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.0 200 OK",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.0",
b"Content-Length: 13",
b"'''\nNone\n'''\n")]
def test_buffer(self):
"""
Send requests over a channel and check responses match what is expected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandler
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all one"))
value = b.value()
self.assertResponseEquals(value, self.expected_response)
def test_requestBodyTimeout(self):
"""
L{HTTPChannel} resets its timeout whenever data from a request body is
delivered to it.
"""
clock = Clock()
transport = StringTransport()
protocol = http.HTTPChannel()
protocol.timeOut = 100
protocol.callLater = clock.callLater
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived(b'x')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived(b'x')
self.assertEqual(len(protocol.requests), 1)
def test_noPipeliningApi(self):
"""
Test that a L{http.Request} subclass with no queued kwarg works as
expected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DummyNewHTTPHandler
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
value = b.value()
self.assertResponseEquals(value, self.expected_response)
def test_noPipelining(self):
"""
Test that pipelined requests get buffered, not processed in parallel.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DelayedHTTPHandler
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
value = b.value()
# So far only one request should have been dispatched.
self.assertEqual(value, b'')
self.assertEqual(1, len(a.requests))
# Now, process each request one at a time.
while a.requests:
self.assertEqual(1, len(a.requests))
a.requests[0].delayedProcess()
value = b.value()
self.assertResponseEquals(value, self.expected_response)
class HTTP1_1Tests(HTTP1_0Tests):
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"\r\n"
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789HEAD / HTTP/1.1\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"'''\nNone\n'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: HEAD",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"")]
class HTTP1_1_close_Tests(HTTP1_0Tests):
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"Connection: close\r\n"
b"\r\n"
b"GET / HTTP/1.0\r\n"
b"\r\n")
expected_response = [
(b"HTTP/1.1 200 OK",
b"Connection: close",
b"Request: /",
b"Command: GET",
b"Version: HTTP/1.1",
b"Content-Length: 13",
b"'''\nNone\n'''\n")]
class HTTP0_9Tests(HTTP1_0Tests):
requests = (
b"GET /\r\n")
expected_response = b"HTTP/1.1 400 Bad Request\r\n\r\n"
def assertResponseEquals(self, response, expectedResponse):
self.assertEqual(response, expectedResponse)
def test_noPipelining(self):
raise unittest.SkipTest("HTTP/0.9 not supported")
class PipeliningBodyTests(unittest.TestCase, ResponseTestMixin):
"""
Tests that multiple pipelined requests with bodies are correctly buffered.
"""
requests = (
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789"
)
expectedResponses = [
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n"),
(b"HTTP/1.1 200 OK",
b"Request: /",
b"Command: POST",
b"Version: HTTP/1.1",
b"Content-Length: 21",
b"'''\n10\n0123456789'''\n")]
def test_noPipelining(self):
"""
Test that pipelined requests get buffered, not processed in parallel.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DelayedHTTPHandler
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
value = b.value()
# So far only one request should have been dispatched.
self.assertEqual(value, b'')
self.assertEqual(1, len(a.requests))
# Now, process each request one at a time.
while a.requests:
self.assertEqual(1, len(a.requests))
a.requests[0].delayedProcess()
value = b.value()
self.assertResponseEquals(value, self.expectedResponses)
class ShutdownTests(unittest.TestCase):
"""
Tests that connections can be shut down by L{http.Request} objects.
"""
class ShutdownHTTPHandler(http.Request):
"""
A HTTP handler that just immediately calls loseConnection.
"""
def process(self):
self.loseConnection()
request = (
b"POST / HTTP/1.1\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"0123456789"
)
def test_losingConnection(self):
"""
Calling L{http.Request.loseConnection} causes the transport to be
disconnected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = self.ShutdownHTTPHandler
a.makeConnection(b)
a.dataReceived(self.request)
# The transport should have been shut down.
self.assertTrue(b.disconnecting)
# No response should have been written.
value = b.value()
self.assertEqual(value, b'')
class SecurityTests(unittest.TestCase):
"""
Tests that L{http.Request.isSecure} correctly takes the transport into
account.
"""
def test_isSecure(self):
"""
Calling L{http.Request.isSecure} when the channel is backed with a
secure transport will return L{True}.
"""
b = DummyChannel.SSL()
a = http.HTTPChannel()
a.makeConnection(b)
req = http.Request(a)
self.assertTrue(req.isSecure())
def test_notSecure(self):
"""
Calling L{http.Request.isSecure} when the channel is not backed with a
secure transport will return L{False}.
"""
b = DummyChannel.TCP()
a = http.HTTPChannel()
a.makeConnection(b)
req = http.Request(a)
self.assertFalse(req.isSecure())
def test_notSecureAfterFinish(self):
"""
After a request is finished, calling L{http.Request.isSecure} will
always return L{False}.
"""
b = DummyChannel.SSL()
a = http.HTTPChannel()
a.makeConnection(b)
req = http.Request(a)
a.requests.append(req)
req.setResponseCode(200)
req.finish()
self.assertFalse(req.isSecure())
class GenericHTTPChannelTests(unittest.TestCase):
"""
Tests for L{http._genericHTTPChannelProtocol}, a L{HTTPChannel}-alike which
can handle different HTTP protocol channels.
"""
requests = (
b"GET / HTTP/1.1\r\n"
b"Accept: text/html\r\n"
b"Connection: close\r\n"
b"\r\n"
b"GET / HTTP/1.0\r\n"
b"\r\n")
def _negotiatedProtocolForTransportInstance(self, t):
"""
Run a request using the specific instance of a transport. Returns the
negotiated protocol string.
"""
a = http._genericHTTPChannelProtocolFactory(b'')
a.requestFactory = DummyHTTPHandler
a.makeConnection(t)
# one byte at a time, to stress it.
for byte in iterbytes(self.requests):
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
return a._negotiatedProtocol
def test_protocolUnspecified(self):
"""
If the transport has no support for protocol negotiation (no
negotiatedProtocol attribute), HTTP/1.1 is assumed.
"""
b = StringTransport()
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'http/1.1')
def test_protocolNone(self):
"""
If the transport has no support for protocol negotiation (returns None
for negotiatedProtocol), HTTP/1.1 is assumed.
"""
b = StringTransport()
b.negotiatedProtocol = None
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'http/1.1')
def test_http11(self):
"""
If the transport reports that HTTP/1.1 is negotiated, that's what's
negotiated.
"""
b = StringTransport()
b.negotiatedProtocol = b'http/1.1'
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'http/1.1')
def test_http2_present(self):
"""
If the transport reports that HTTP/2 is negotiated and HTTP/2 is
present, that's what's negotiated.
"""
b = StringTransport()
b.negotiatedProtocol = b'h2'
negotiatedProtocol = self._negotiatedProtocolForTransportInstance(b)
self.assertEqual(negotiatedProtocol, b'h2')
if not http.H2_ENABLED:
test_http2_present.skip = "HTTP/2 support not present"
def test_http2_absent(self):
"""
If the transport reports that HTTP/2 is negotiated and HTTP/2 is not
present, an error is encountered.
"""
b = StringTransport()
b.negotiatedProtocol = b'h2'
self.assertRaises(
ValueError,
self._negotiatedProtocolForTransportInstance,
b,
)
if http.H2_ENABLED:
test_http2_absent.skip = "HTTP/2 support present"
def test_unknownProtocol(self):
"""
If the transport reports that a protocol other than HTTP/1.1 or HTTP/2
is negotiated, an error occurs.
"""
b = StringTransport()
b.negotiatedProtocol = b'smtp'
self.assertRaises(
AssertionError,
self._negotiatedProtocolForTransportInstance,
b,
)
def test_factory(self):
"""
The C{factory} attribute is taken from the inner channel.
"""
a = http._genericHTTPChannelProtocolFactory(b'')
a._channel.factory = b"Foo"
self.assertEqual(a.factory, b"Foo")
class HTTPLoopbackTests(unittest.TestCase):
expectedHeaders = {b'request': b'/foo/bar',
b'command': b'GET',
b'version': b'HTTP/1.0',
b'content-length': b'21'}
numHeaders = 0
gotStatus = 0
gotResponse = 0
gotEndHeaders = 0
def _handleStatus(self, version, status, message):
self.gotStatus = 1
self.assertEqual(version, b"HTTP/1.0")
self.assertEqual(status, b"200")
def _handleResponse(self, data):
self.gotResponse = 1
self.assertEqual(data, b"'''\n10\n0123456789'''\n")
def _handleHeader(self, key, value):
self.numHeaders = self.numHeaders + 1
self.assertEqual(self.expectedHeaders[key.lower()], value)
def _handleEndHeaders(self):
self.gotEndHeaders = 1
self.assertEqual(self.numHeaders, 4)
def testLoopback(self):
server = http.HTTPChannel()
server.requestFactory = DummyHTTPHandler
client = LoopbackHTTPClient()
client.handleResponse = self._handleResponse
client.handleHeader = self._handleHeader
client.handleEndHeaders = self._handleEndHeaders
client.handleStatus = self._handleStatus
d = loopback.loopbackAsync(server, client)
d.addCallback(self._cbTestLoopback)
return d
def _cbTestLoopback(self, ignored):
if not (self.gotStatus and self.gotResponse and self.gotEndHeaders):
raise RuntimeError(
"didn't got all callbacks %s"
% [self.gotStatus, self.gotResponse, self.gotEndHeaders])
del self.gotEndHeaders
del self.gotResponse
del self.gotStatus
del self.numHeaders
def _prequest(**headers):
"""
Make a request with the given request headers for the persistence tests.
"""
request = http.Request(DummyChannel(), False)
for headerName, v in headers.items():
request.requestHeaders.setRawHeaders(networkString(headerName), v)
return request
class PersistenceTests(unittest.TestCase):
"""
Tests for persistent HTTP connections.
"""
def setUp(self):
self.channel = http.HTTPChannel()
self.request = _prequest()
def test_http09(self):
"""
After being used for an I{HTTP/0.9} request, the L{HTTPChannel} is not
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/0.9")
self.assertFalse(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http10(self):
"""
After being used for an I{HTTP/1.0} request, the L{HTTPChannel} is not
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/1.0")
self.assertFalse(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http11(self):
"""
After being used for an I{HTTP/1.1} request, the L{HTTPChannel} is
persistent.
"""
persist = self.channel.checkPersistence(self.request, b"HTTP/1.1")
self.assertTrue(persist)
self.assertEqual(
[], list(self.request.responseHeaders.getAllRawHeaders()))
def test_http11Close(self):
"""
After being used for an I{HTTP/1.1} request with a I{Connection: Close}
header, the L{HTTPChannel} is not persistent.
"""
request = _prequest(connection=[b"close"])
persist = self.channel.checkPersistence(request, b"HTTP/1.1")
self.assertFalse(persist)
self.assertEqual(
[(b"Connection", [b"close"])],
list(request.responseHeaders.getAllRawHeaders()))
class IdentityTransferEncodingTests(TestCase):
"""
Tests for L{_IdentityTransferDecoder}.
"""
def setUp(self):
"""
Create an L{_IdentityTransferDecoder} with callbacks hooked up so that
calls to them can be inspected.
"""
self.data = []
self.finish = []
self.contentLength = 10
self.decoder = _IdentityTransferDecoder(
self.contentLength, self.data.append, self.finish.append)
def test_exactAmountReceived(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a byte string
with length equal to the content length passed to
L{_IdentityTransferDecoder}'s initializer, the data callback is invoked
with that string and the finish callback is invoked with a zero-length
string.
"""
self.decoder.dataReceived(b'x' * self.contentLength)
self.assertEqual(self.data, [b'x' * self.contentLength])
self.assertEqual(self.finish, [b''])
def test_shortStrings(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called multiple times
with byte strings which, when concatenated, are as long as the content
length provided, the data callback is invoked with each string and the
finish callback is invoked only after the second call.
"""
self.decoder.dataReceived(b'x')
self.assertEqual(self.data, [b'x'])
self.assertEqual(self.finish, [])
self.decoder.dataReceived(b'y' * (self.contentLength - 1))
self.assertEqual(self.data, [b'x', b'y' * (self.contentLength - 1)])
self.assertEqual(self.finish, [b''])
def test_longString(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a byte string
with length greater than the provided content length, only the prefix
of that string up to the content length is passed to the data callback
and the remainder is passed to the finish callback.
"""
self.decoder.dataReceived(b'x' * self.contentLength + b'y')
self.assertEqual(self.data, [b'x' * self.contentLength])
self.assertEqual(self.finish, [b'y'])
def test_rejectDataAfterFinished(self):
"""
If data is passed to L{_IdentityTransferDecoder.dataReceived} after the
finish callback has been invoked, C{RuntimeError} is raised.
"""
failures = []
def finish(bytes):
try:
decoder.dataReceived(b'foo')
except:
failures.append(Failure())
decoder = _IdentityTransferDecoder(5, self.data.append, finish)
decoder.dataReceived(b'x' * 4)
self.assertEqual(failures, [])
decoder.dataReceived(b'y')
failures[0].trap(RuntimeError)
self.assertEqual(
str(failures[0].value),
"_IdentityTransferDecoder cannot decode data after finishing")
def test_unknownContentLength(self):
"""
If L{_IdentityTransferDecoder} is constructed with L{None} for the
content length, it passes all data delivered to it through to the data
callback.
"""
data = []
finish = []
decoder = _IdentityTransferDecoder(None, data.append, finish.append)
decoder.dataReceived(b'x')
self.assertEqual(data, [b'x'])
decoder.dataReceived(b'y')
self.assertEqual(data, [b'x', b'y'])
self.assertEqual(finish, [])
def _verifyCallbacksUnreferenced(self, decoder):
"""
Check the decoder's data and finish callbacks and make sure they are
None in order to help avoid references cycles.
"""
self.assertIdentical(decoder.dataCallback, None)
self.assertIdentical(decoder.finishCallback, None)
def test_earlyConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the content length is known but not enough bytes have been
delivered.
"""
self.decoder.dataReceived(b'x' * (self.contentLength - 1))
self.assertRaises(_DataLoss, self.decoder.noMoreData)
self._verifyCallbacksUnreferenced(self.decoder)
def test_unknownContentLengthConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} calls the finish callback and
raises L{PotentialDataLoss} if it is called and the content length is
unknown.
"""
body = []
finished = []
decoder = _IdentityTransferDecoder(None, body.append, finished.append)
self.assertRaises(PotentialDataLoss, decoder.noMoreData)
self.assertEqual(body, [])
self.assertEqual(finished, [b''])
self._verifyCallbacksUnreferenced(decoder)
def test_finishedConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} does not raise any exception if
it is called when the content length is known and that many bytes have
been delivered.
"""
self.decoder.dataReceived(b'x' * self.contentLength)
self.decoder.noMoreData()
self._verifyCallbacksUnreferenced(self.decoder)
class ChunkedTransferEncodingTests(unittest.TestCase):
"""
Tests for L{_ChunkedTransferDecoder}, which turns a byte stream encoded
using HTTP I{chunked} C{Transfer-Encoding} back into the original byte
stream.
"""
def test_decoding(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunked-encoded data
and passes the result to the specified callback.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'3\r\nabc\r\n5\r\n12345\r\n')
p.dataReceived(b'a\r\n0123456789\r\n')
self.assertEqual(L, [b'abc', b'12345', b'0123456789'])
def test_short(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunks broken up and
delivered in multiple calls.
"""
L = []
finished = []
p = http._ChunkedTransferDecoder(L.append, finished.append)
for s in iterbytes(b'3\r\nabc\r\n5\r\n12345\r\n0\r\n\r\n'):
p.dataReceived(s)
self.assertEqual(L, [b'a', b'b', b'c', b'1', b'2', b'3', b'4', b'5'])
self.assertEqual(finished, [b''])
def test_newlines(self):
"""
L{_ChunkedTransferDecoder.dataReceived} doesn't treat CR LF pairs
embedded in chunk bodies specially.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'2\r\n\r\n\r\n')
self.assertEqual(L, [b'\r\n'])
def test_extensions(self):
"""
L{_ChunkedTransferDecoder.dataReceived} disregards chunk-extension
fields.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived(b'3; x-foo=bar\r\nabc\r\n')
self.assertEqual(L, [b'abc'])
def test_finish(self):
"""
L{_ChunkedTransferDecoder.dataReceived} interprets a zero-length
chunk as the end of the chunked data stream and calls the completion
callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived(b'0\r\n\r\n')
self.assertEqual(finished, [b''])
def test_extra(self):
"""
L{_ChunkedTransferDecoder.dataReceived} passes any bytes which come
after the terminating zero-length chunk to the completion callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived(b'0\r\n\r\nhello')
self.assertEqual(finished, [b'hello'])
def test_afterFinished(self):
"""
L{_ChunkedTransferDecoder.dataReceived} raises C{RuntimeError} if it
is called after it has seen the last chunk.
"""
p = http._ChunkedTransferDecoder(None, lambda bytes: None)
p.dataReceived(b'0\r\n\r\n')
self.assertRaises(RuntimeError, p.dataReceived, b'hello')
def test_earlyConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the end of the last trailer has not yet been received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived(b'0\r\n\r')
exc = self.assertRaises(_DataLoss, parser.noMoreData)
self.assertEqual(
str(exc),
"Chunked decoder in 'TRAILER' state, still expecting more data "
"to get to 'FINISHED' state.")
def test_finishedConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} does not raise any exception if
it is called after the terminal zero length chunk is received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived(b'0\r\n\r\n')
parser.noMoreData()
def test_reentrantFinishedNoMoreData(self):
"""
L{_ChunkedTransferDecoder.noMoreData} can be called from the finished
callback without raising an exception.
"""
errors = []
successes = []
def finished(extra):
try:
parser.noMoreData()
except:
errors.append(Failure())
else:
successes.append(True)
parser = http._ChunkedTransferDecoder(None, finished)
parser.dataReceived(b'0\r\n\r\n')
self.assertEqual(errors, [])
self.assertEqual(successes, [True])
class ChunkingTests(unittest.TestCase, ResponseTestMixin):
strings = [b"abcv", b"", b"fdfsd423", b"Ffasfas\r\n",
b"523523\n\rfsdf", b"4234"]
def testChunks(self):
for s in self.strings:
chunked = b''.join(http.toChunk(s))
self.assertEqual((s, b''), http.fromChunk(chunked))
self.assertRaises(ValueError, http.fromChunk, b'-5\r\nmalformed!\r\n')
def testConcatenatedChunks(self):
chunked = b''.join([b''.join(http.toChunk(t)) for t in self.strings])
result = []
buffer = b""
for c in iterbytes(chunked):
buffer = buffer + c
try:
data, buffer = http.fromChunk(buffer)
result.append(data)
except ValueError:
pass
self.assertEqual(result, self.strings)
def test_chunkedResponses(self):
"""
Test that the L{HTTPChannel} correctly chunks responses when needed.
"""
channel = http.HTTPChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.1"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
req.write(b'World!')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.1 200 OK",
b"Test: lemur",
b"Transfer-Encoding: chunked",
b"5\r\nHello\r\n6\r\nWorld!\r\n")])
class ParsingTests(unittest.TestCase):
"""
Tests for protocol parsing in L{HTTPChannel}.
"""
def setUp(self):
self.didRequest = False
def runRequest(self, httpRequest, requestFactory=None, success=True,
channel=None):
"""
Execute a web request based on plain text content.
@param httpRequest: Content for the request which is processed.
@type httpRequest: C{bytes}
@param requestFactory: 2-argument callable returning a Request.
@type requestFactory: C{callable}
@param success: Value to compare against I{self.didRequest}.
@type success: C{bool}
@param channel: Channel instance over which the request is processed.
@type channel: L{HTTPChannel}
@return: Returns the channel used for processing the request.
@rtype: L{HTTPChannel}
"""
if not channel:
channel = http.HTTPChannel()
if requestFactory:
channel.requestFactory = requestFactory
httpRequest = httpRequest.replace(b"\n", b"\r\n")
transport = StringTransport()
channel.makeConnection(transport)
# one byte at a time, to stress it.
for byte in iterbytes(httpRequest):
if channel.transport.disconnecting:
break
channel.dataReceived(byte)
channel.connectionLost(IOError("all done"))
if success:
self.assertTrue(self.didRequest)
else:
self.assertFalse(self.didRequest)
return channel
def test_invalidNonAsciiMethod(self):
"""
When client sends invalid HTTP method containing
non-ascii characters HTTP 400 'Bad Request' status will be returned.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
badRequestLine = b"GE\xc2\xa9 / HTTP/1.1\r\n\r\n"
channel = self.runRequest(badRequestLine, MyRequest, 0)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
self.assertEqual(processed, [])
def test_basicAuth(self):
"""
L{HTTPChannel} provides username and password information supplied in
an I{Authorization} header to the L{Request} which makes it available
via its C{getUser} and C{getPassword} methods.
"""
requests = []
class Request(http.Request):
def process(self):
self.credentials = (self.getUser(), self.getPassword())
requests.append(self)
for u, p in [(b"foo", b"bar"), (b"hello", b"there:z")]:
s = base64.encodestring(b":".join((u, p))).strip()
f = b"GET / HTTP/1.0\nAuthorization: Basic " + s + b"\n\n"
self.runRequest(f, Request, 0)
req = requests.pop()
self.assertEqual((u, p), req.credentials)
def test_headers(self):
"""
Headers received by L{HTTPChannel} in a request are made available to
the L{Request}.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [
b"GET / HTTP/1.0",
b"Foo: bar",
b"baz: Quux",
b"baz: quux",
b"",
b""]
self.runRequest(b'\n'.join(requestLines), MyRequest, 0)
[request] = processed
self.assertEqual(
request.requestHeaders.getRawHeaders(b'foo'), [b'bar'])
self.assertEqual(
request.requestHeaders.getRawHeaders(b'bAz'), [b'Quux', b'quux'])
def test_tooManyHeaders(self):
"""
L{HTTPChannel} enforces a limit of C{HTTPChannel.maxHeaders} on the
number of headers received per request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
requestLines = [b"GET / HTTP/1.0"]
for i in range(http.HTTPChannel.maxHeaders + 2):
requestLines.append(networkString("%s: foo" % (i,)))
requestLines.extend([b"", b""])
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_invalidContentLengthHeader(self):
"""
If a Content-Length header with a non-integer value is received, a 400
(Bad Request) response is sent to the client and the connection is
closed.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [b"GET / HTTP/1.0", b"Content-Length: x", b"", b""]
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
self.assertEqual(processed, [])
def test_invalidHeaderNoColon(self):
"""
If a header without colon is received a 400 (Bad Request) response
is sent to the client and the connection is closed.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [b"GET / HTTP/1.0", b"HeaderName ", b"", b""]
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
self.assertEqual(processed, [])
def test_headerLimitPerRequest(self):
"""
L{HTTPChannel} enforces the limit of C{HTTPChannel.maxHeaders} per
request so that headers received in an earlier request do not count
towards the limit when processing a later request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
self.patch(http.HTTPChannel, 'maxHeaders', 1)
requestLines = [
b"GET / HTTP/1.1",
b"Foo: bar",
b"",
b"",
b"GET / HTTP/1.1",
b"Bar: baz",
b"",
b""]
channel = self.runRequest(b"\n".join(requestLines), MyRequest, 0)
[first, second] = processed
self.assertEqual(first.getHeader(b'foo'), b'bar')
self.assertEqual(second.getHeader(b'bar'), b'baz')
self.assertEqual(
channel.transport.value(),
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n')
def test_headersTooBigInitialCommand(self):
"""
Enforces a limit of C{HTTPChannel.totalHeadersSize}
on the size of headers received per request starting from initial
command line.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
channel = http.HTTPChannel()
channel.totalHeadersSize = 10
httpRequest = b'GET /path/longer/than/10 HTTP/1.1\n'
channel = self.runRequest(
httpRequest=httpRequest,
requestFactory=MyRequest,
channel=channel,
success=False
)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_headersTooBigOtherHeaders(self):
"""
Enforces a limit of C{HTTPChannel.totalHeadersSize}
on the size of headers received per request counting first line
and total headers.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
channel = http.HTTPChannel()
channel.totalHeadersSize = 40
httpRequest = (
b'GET /less/than/40 HTTP/1.1\n'
b'Some-Header: less-than-40\n'
)
channel = self.runRequest(
httpRequest=httpRequest,
requestFactory=MyRequest,
channel=channel, success=False
)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_headersTooBigPerRequest(self):
"""
Enforces total size of headers per individual request and counter
is reset at the end of each request.
"""
class SimpleRequest(http.Request):
def process(self):
self.finish()
channel = http.HTTPChannel()
channel.totalHeadersSize = 60
channel.requestFactory = SimpleRequest
httpRequest = (
b'GET / HTTP/1.1\n'
b'Some-Header: total-less-than-60\n'
b'\n'
b'GET / HTTP/1.1\n'
b'Some-Header: less-than-60\n'
b'\n'
)
channel = self.runRequest(
httpRequest=httpRequest, channel=channel, success=False)
self.assertEqual(
channel.transport.value(),
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
b'0\r\n'
b'\r\n'
)
def testCookies(self):
"""
Test cookies parsing and reading.
"""
httpRequest = b'''\
GET / HTTP/1.0
Cookie: rabbit="eat carrot"; ninja=secret; spam="hey 1=1!"
'''
cookies = {}
testcase = self
class MyRequest(http.Request):
def process(self):
for name in [b'rabbit', b'ninja', b'spam']:
cookies[name] = self.getCookie(name)
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(
cookies, {
b'rabbit': b'"eat carrot"',
b'ninja': b'secret',
b'spam': b'"hey 1=1!"'})
def testGET(self):
httpRequest = b'''\
GET /?key=value&multiple=two+words&multiple=more%20words&empty= HTTP/1.0
'''
method = []
args = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
args.extend([
self.args[b"key"],
self.args[b"empty"],
self.args[b"multiple"]])
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b"GET"])
self.assertEqual(
args, [[b"value"], [b""], [b"two words", b"more words"]])
def test_extraQuestionMark(self):
"""
While only a single '?' is allowed in an URL, several other servers
allow several and pass all after the first through as part of the
query arguments. Test that we emulate this behavior.
"""
httpRequest = b'GET /foo?bar=?&baz=quux HTTP/1.0\n\n'
method = []
path = []
args = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
path.append(self.path)
args.extend([self.args[b'bar'], self.args[b'baz']])
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b'GET'])
self.assertEqual(path, [b'/foo'])
self.assertEqual(args, [[b'?'], [b'quux']])
def test_formPOSTRequest(self):
"""
The request body of a I{POST} request with a I{Content-Type} header
of I{application/x-www-form-urlencoded} is parsed according to that
content type and made available in the C{args} attribute of the
request object. The original bytes of the request may still be read
from the C{content} attribute.
"""
query = 'key=value&multiple=two+words&multiple=more%20words&empty='
httpRequest = networkString('''\
POST / HTTP/1.0
Content-Length: %d
Content-Type: application/x-www-form-urlencoded
%s''' % (len(query), query))
method = []
args = []
content = []
testcase = self
class MyRequest(http.Request):
def process(self):
method.append(self.method)
args.extend([
self.args[b'key'], self.args[b'empty'],
self.args[b'multiple']])
content.append(self.content.read())
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
self.assertEqual(method, [b"POST"])
self.assertEqual(
args, [[b"value"], [b""], [b"two words", b"more words"]])
# Reading from the content file-like must produce the entire request
# body.
self.assertEqual(content, [networkString(query)])
def test_missingContentDisposition(self):
"""
If the C{Content-Disposition} header is missing, the request is denied
as a bad request.
"""
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 103
--AaB03x
Content-Type: text/plain
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
channel = self.runRequest(req, http.Request, success=False)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
if _PY3:
test_missingContentDisposition.skip = (
"cgi.parse_multipart is much more error-tolerant on Python 3.")
def test_multipartProcessingFailure(self):
"""
When the multipart processing fails the client gets a 400 Bad Request.
"""
# The parsing failure is simulated by having a Content-Length that
# doesn't fit in a ssize_t.
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 103
--AaB03x
Content-Type: text/plain
Content-Length: 999999999999999999999999999999999999999999999999999999999999999
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
channel = self.runRequest(req, http.Request, success=False)
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_multipartFormData(self):
"""
If the request has a Content-Type of C{multipart/form-data}, and the
form data is parseable, the form arguments will be added to the
request's args.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.write(b"done")
self.finish()
req = b'''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 149
--AaB03x
Content-Type: text/plain
Content-Disposition: form-data; name="text"
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
channel = self.runRequest(req, MyRequest, success=False)
self.assertEqual(channel.transport.value(),
b"HTTP/1.0 200 OK\r\n\r\ndone")
self.assertEqual(len(processed), 1)
self.assertEqual(processed[0].args, {b"text": [b"abasdfg"]})
def test_chunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, the request body is
decoded accordingly before it is made available on the request.
"""
httpRequest = b'''\
GET / HTTP/1.0
Content-Type: text/plain
Transfer-Encoding: chunked
6
Hello,
14
spam,eggs spam spam
0
'''
path = []
method = []
content = []
decoder = []
testcase = self
class MyRequest(http.Request):
def process(self):
content.append(self.content.fileno())
content.append(self.content.read())
method.append(self.method)
path.append(self.path)
decoder.append(self.channel._transferDecoder)
testcase.didRequest = True
self.finish()
self.runRequest(httpRequest, MyRequest)
# The tempfile API used to create content returns an
# instance of a different type depending on what platform
# we're running on. The point here is to verify that the
# request body is in a file that's on the filesystem.
# Having a fileno method that returns an int is a somewhat
# close approximation of this. -exarkun
self.assertIsInstance(content[0], int)
self.assertEqual(content[1], b'Hello, spam,eggs spam spam')
self.assertEqual(method, [b'GET'])
self.assertEqual(path, [b'/'])
self.assertEqual(decoder, [None])
def test_malformedChunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, but provides an
invalid chunk length value, the request fails with a 400 error.
"""
# See test_chunkedEncoding for the correct form of this request.
httpRequest = b'''\
GET / HTTP/1.1
Content-Type: text/plain
Transfer-Encoding: chunked
MALFORMED_LINE_THIS_SHOULD_BE_'6'
Hello,
14
spam,eggs spam spam
0
'''
didRequest = []
class MyRequest(http.Request):
def process(self):
# This request should fail, so this should never be called.
didRequest.append(True)
channel = self.runRequest(httpRequest, MyRequest, success=False)
self.assertFalse(didRequest, "Request.process called")
self.assertEqual(
channel.transport.value(),
b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.assertTrue(channel.transport.disconnecting)
class QueryArgumentsTests(unittest.TestCase):
def testParseqs(self):
self.assertEqual(
cgi.parse_qs(b"a=b&d=c;+=f"),
http.parse_qs(b"a=b&d=c;+=f"))
self.assertRaises(
ValueError, http.parse_qs, b"blah", strict_parsing=True)
self.assertEqual(
cgi.parse_qs(b"a=&b=c", keep_blank_values=1),
http.parse_qs(b"a=&b=c", keep_blank_values=1))
self.assertEqual(
cgi.parse_qs(b"a=&b=c"),
http.parse_qs(b"a=&b=c"))
def test_urlparse(self):
"""
For a given URL, L{http.urlparse} should behave the same as L{urlparse},
except it should always return C{bytes}, never text.
"""
def urls():
for scheme in (b'http', b'https'):
for host in (b'example.com',):
for port in (None, 100):
for path in (b'', b'path'):
if port is not None:
host = host + b':' + networkString(str(port))
yield urlunsplit((scheme, host, path, b'', b''))
def assertSameParsing(url, decode):
"""
Verify that C{url} is parsed into the same objects by both
L{http.urlparse} and L{urlparse}.
"""
urlToStandardImplementation = url
if decode:
urlToStandardImplementation = url.decode('ascii')
# stdlib urlparse will give back whatever type we give it. To be
# able to compare the values meaningfully, if it gives back unicode,
# convert all the values to bytes.
standardResult = urlparse(urlToStandardImplementation)
if isinstance(standardResult.scheme, unicode):
# The choice of encoding is basically irrelevant. The values
# are all in ASCII. UTF-8 is, of course, the correct choice.
expected = (standardResult.scheme.encode('utf-8'),
standardResult.netloc.encode('utf-8'),
standardResult.path.encode('utf-8'),
standardResult.params.encode('utf-8'),
standardResult.query.encode('utf-8'),
standardResult.fragment.encode('utf-8'))
else:
expected = (standardResult.scheme,
standardResult.netloc,
standardResult.path,
standardResult.params,
standardResult.query,
standardResult.fragment)
scheme, netloc, path, params, query, fragment = http.urlparse(url)
self.assertEqual(
(scheme, netloc, path, params, query, fragment), expected)
self.assertIsInstance(scheme, bytes)
self.assertIsInstance(netloc, bytes)
self.assertIsInstance(path, bytes)
self.assertIsInstance(params, bytes)
self.assertIsInstance(query, bytes)
self.assertIsInstance(fragment, bytes)
# With caching, unicode then str
clear_cache()
for url in urls():
assertSameParsing(url, True)
assertSameParsing(url, False)
# With caching, str then unicode
clear_cache()
for url in urls():
assertSameParsing(url, False)
assertSameParsing(url, True)
# Without caching
for url in urls():
clear_cache()
assertSameParsing(url, True)
clear_cache()
assertSameParsing(url, False)
def test_urlparseRejectsUnicode(self):
"""
L{http.urlparse} should reject unicode input early.
"""
self.assertRaises(TypeError, http.urlparse, u'http://example.org/path')
class ClientDriver(http.HTTPClient):
def handleStatus(self, version, status, message):
self.version = version
self.status = status
self.message = message
class ClientStatusParsingTests(unittest.TestCase):
def testBaseline(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201 foo')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'foo')
def testNoMessage(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'')
def testNoMessage_trailingSpace(self):
c = ClientDriver()
c.lineReceived(b'HTTP/1.0 201 ')
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.message, b'')
class RequestTests(unittest.TestCase, ResponseTestMixin):
"""
Tests for L{http.Request}
"""
def _compatHeadersTest(self, oldName, newName):
"""
Verify that each of two different attributes which are associated with
the same state properly reflect changes made through the other.
This is used to test that the C{headers}/C{responseHeaders} and
C{received_headers}/C{requestHeaders} pairs interact properly.
"""
req = http.Request(DummyChannel(), False)
getattr(req, newName).setRawHeaders(b"test", [b"lemur"])
self.assertEqual(getattr(req, oldName)[b"test"], b"lemur")
setattr(req, oldName, {b"foo": b"bar"})
self.assertEqual(
list(getattr(req, newName).getAllRawHeaders()),
[(b"Foo", [b"bar"])])
setattr(req, newName, http_headers.Headers())
self.assertEqual(getattr(req, oldName), {})
def test_getHeader(self):
"""
L{http.Request.getHeader} returns the value of the named request
header.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur"])
self.assertEqual(req.getHeader(b"test"), b"lemur")
def test_getHeaderReceivedMultiples(self):
"""
When there are multiple values for a single request header,
L{http.Request.getHeader} returns the last value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur", b"panda"])
self.assertEqual(req.getHeader(b"test"), b"panda")
def test_getHeaderNotFound(self):
"""
L{http.Request.getHeader} returns L{None} when asked for the value of a
request header which is not present.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.getHeader(b"test"), None)
def test_getAllHeaders(self):
"""
L{http.Request.getAllheaders} returns a C{dict} mapping all request
header names to their corresponding values.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur"])
self.assertEqual(req.getAllHeaders(), {b"test": b"lemur"})
def test_getAllHeadersNoHeaders(self):
"""
L{http.Request.getAllHeaders} returns an empty C{dict} if there are no
request headers.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.getAllHeaders(), {})
def test_getAllHeadersMultipleHeaders(self):
"""
When there are multiple values for a single request header,
L{http.Request.getAllHeaders} returns only the last value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(b"test", [b"lemur", b"panda"])
self.assertEqual(req.getAllHeaders(), {b"test": b"panda"})
def test_setResponseCode(self):
"""
L{http.Request.setResponseCode} takes a status code and causes it to be
used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.setResponseCode(201)
req.write(b'')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
b"(no clientproto yet) 201 Created")
def test_setResponseCodeAndMessage(self):
"""
L{http.Request.setResponseCode} takes a status code and a message and
causes them to be used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.setResponseCode(202, b"happily accepted")
req.write(b'')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
b'(no clientproto yet) 202 happily accepted')
def test_setResponseCodeAndMessageNotBytes(self):
"""
L{http.Request.setResponseCode} accepts C{bytes} for the message
parameter and raises L{TypeError} if passed anything else.
"""
channel = DummyChannel()
req = http.Request(channel, False)
self.assertRaises(TypeError, req.setResponseCode,
202, u"not happily accepted")
def test_setResponseCodeAcceptsIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{int} for the code parameter
and raises L{TypeError} if passed anything else.
"""
req = http.Request(DummyChannel(), False)
req.setResponseCode(1)
self.assertRaises(TypeError, req.setResponseCode, "1")
def test_setResponseCodeAcceptsLongIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{long} for the code
parameter.
"""
req = http.Request(DummyChannel(), False)
req.setResponseCode(long(1))
if _PY3:
test_setResponseCodeAcceptsLongIntegers.skip = (
"Python 3 has no separate long integer type.")
def test_setLastModifiedNeverSet(self):
"""
When no previous value was set and no 'if-modified-since' value was
requested, L{http.Request.setLastModified} takes a timestamp in seconds
since the epoch and sets the request's lastModified attribute.
"""
req = http.Request(DummyChannel(), False)
req.setLastModified(42)
self.assertEqual(req.lastModified, 42)
def test_setLastModifiedUpdate(self):
"""
If the supplied timestamp is later than the lastModified attribute's
value, L{http.Request.setLastModified} updates the lastModifed
attribute.
"""
req = http.Request(DummyChannel(), False)
req.setLastModified(0)
req.setLastModified(1)
self.assertEqual(req.lastModified, 1)
def test_setLastModifiedIgnore(self):
"""
If the supplied timestamp occurs earlier than the current lastModified
attribute, L{http.Request.setLastModified} ignores it.
"""
req = http.Request(DummyChannel(), False)
req.setLastModified(1)
req.setLastModified(0)
self.assertEqual(req.lastModified, 1)
def test_setLastModifiedCached(self):
"""
If the resource is older than the if-modified-since date in the request
header, L{http.Request.setLastModified} returns L{http.CACHED}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'02 Jan 1970 00:00:00 GMT']
)
result = req.setLastModified(42)
self.assertEqual(result, http.CACHED)
def test_setLastModifiedNotCached(self):
"""
If the resource is newer than the if-modified-since date in the request
header, L{http.Request.setLastModified} returns None
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'01 Jan 1970 00:00:00 GMT']
)
result = req.setLastModified(1000000)
self.assertEqual(result, None)
def test_setLastModifiedTwiceNotCached(self):
"""
When L{http.Request.setLastModified} is called multiple times, the
highest supplied value is honored. If that value is higher than the
if-modified-since date in the request header, the method returns None.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'01 Jan 1970 00:00:01 GMT']
)
req.setLastModified(1000000)
result = req.setLastModified(0)
self.assertEqual(result, None)
def test_setLastModifiedTwiceCached(self):
"""
When L{http.Request.setLastModified} is called multiple times, the
highest supplied value is honored. If that value is lower than the
if-modified-since date in the request header, the method returns
L{http.CACHED}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
networkString('if-modified-since'),
[b'01 Jan 1999 00:00:01 GMT']
)
req.setLastModified(1)
result = req.setLastModified(0)
self.assertEqual(result, http.CACHED)
def test_setHost(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should not be added because it is the default.
"""
req = http.Request(DummyChannel(), False)
req.setHost(b"example.com", 80)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com"])
def test_setHostSSL(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should not be added because it is the default.
"""
d = DummyChannel()
d.transport = DummyChannel.SSL()
req = http.Request(d, False)
req.setHost(b"example.com", 443)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com"])
def test_setHostNonDefaultPort(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should be added because it is not the default.
"""
req = http.Request(DummyChannel(), False)
req.setHost(b"example.com", 81)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com:81"])
def test_setHostSSLNonDefaultPort(self):
"""
L{http.Request.setHost} sets the value of the host request header.
The port should be added because it is not the default.
"""
d = DummyChannel()
d.transport = DummyChannel.SSL()
req = http.Request(d, False)
req.setHost(b"example.com", 81)
self.assertEqual(
req.requestHeaders.getRawHeaders(b"host"), [b"example.com:81"])
def test_setHeader(self):
"""
L{http.Request.setHeader} sets the value of the given response header.
"""
req = http.Request(DummyChannel(), False)
req.setHeader(b"test", b"lemur")
self.assertEqual(req.responseHeaders.getRawHeaders(b"test"), [b"lemur"])
def _checkCookie(self, expectedCookieValue, *args, **kwargs):
"""
Call L{http.Request.setCookie} with C{*args} and C{**kwargs}, and check
that the cookie value is equal to C{expectedCookieValue}.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.addCookie(*args, **kwargs)
self.assertEqual(req.cookies[0], expectedCookieValue)
# Write nothing to make it produce the headers
req.write(b"")
writtenLines = channel.transport.written.getvalue().split(b"\r\n")
# There should be one Set-Cookie header
setCookieLines = [x for x in writtenLines
if x.startswith(b"Set-Cookie")]
self.assertEqual(len(setCookieLines), 1)
self.assertEqual(setCookieLines[0],
b"Set-Cookie: " + expectedCookieValue)
def test_addCookieWithMinimumArgumentsUnicode(self):
"""
L{http.Request.setCookie} adds a new cookie to be sent with the
response, and can be called with just a key and a value. L{unicode}
arguments are encoded using UTF-8.
"""
expectedCookieValue = b"foo=bar"
self._checkCookie(expectedCookieValue, u"foo", u"bar")
def test_addCookieWithAllArgumentsUnicode(self):
"""
L{http.Request.setCookie} adds a new cookie to be sent with the
response. L{unicode} arguments are encoded using UTF-8.
"""
expectedCookieValue = (
b"foo=bar; Expires=Fri, 31 Dec 9999 23:59:59 GMT; "
b"Domain=.example.com; Path=/; Max-Age=31536000; "
b"Comment=test; Secure; HttpOnly")
self._checkCookie(expectedCookieValue,
u"foo", u"bar", expires=u"Fri, 31 Dec 9999 23:59:59 GMT",
domain=u".example.com", path=u"/", max_age=u"31536000",
comment=u"test", secure=True, httpOnly=True)
def test_addCookieWithMinimumArgumentsBytes(self):
"""
L{http.Request.setCookie} adds a new cookie to be sent with the
response, and can be called with just a key and a value. L{bytes}
arguments are not decoded.
"""
expectedCookieValue = b"foo=bar"
self._checkCookie(expectedCookieValue, b"foo", b"bar")
def test_addCookieWithAllArgumentsBytes(self):
"""
L{http.Request.setCookie} adds a new cookie to be sent with the
response. L{bytes} arguments are not decoded.
"""
expectedCookieValue = (
b"foo=bar; Expires=Fri, 31 Dec 9999 23:59:59 GMT; "
b"Domain=.example.com; Path=/; Max-Age=31536000; "
b"Comment=test; Secure; HttpOnly")
self._checkCookie(expectedCookieValue,
b"foo", b"bar", expires=b"Fri, 31 Dec 9999 23:59:59 GMT",
domain=b".example.com", path=b"/", max_age=b"31536000",
comment=b"test", secure=True, httpOnly=True)
def test_addCookieNonStringArgument(self):
"""
L{http.Request.setCookie} will raise a L{DeprecationWarning} if
non-string (not L{bytes} or L{unicode}) arguments are given, and will
call C{str()} on it to preserve past behaviour.
"""
expectedCookieValue = b"foo=10"
self._checkCookie(expectedCookieValue, b"foo", 10)
warnings = self.flushWarnings([self._checkCookie])
self.assertEqual(1, len(warnings))
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Passing non-bytes or non-unicode cookie arguments is "
"deprecated since Twisted 16.1.")
def test_firstWrite(self):
"""
For an HTTP 1.0 request, L{http.Request.write} sends an HTTP 1.0
Response-Line and whatever response headers are set.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: lemur",
b"Hello")])
def test_nonByteHeaderValue(self):
"""
L{http.Request.write} casts non-bytes header value to bytes
transparently.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.responseHeaders.setRawHeaders(b"test", [10])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: 10",
b"Hello")])
warnings = self.flushWarnings(
offendingFunctions=[self.test_nonByteHeaderValue])
self.assertEqual(1, len(warnings))
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Passing non-bytes header values is deprecated since "
"Twisted 12.3. Pass only bytes instead.")
def test_firstWriteHTTP11Chunked(self):
"""
For an HTTP 1.1 request, L{http.Request.write} sends an HTTP 1.1
Response-Line, whatever response headers are set, and uses chunked
encoding for the response body.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.1"
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
req.write(b'World!')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.1 200 OK",
b"Test: lemur",
b"Transfer-Encoding: chunked",
b"5\r\nHello\r\n6\r\nWorld!\r\n")])
def test_firstWriteLastModified(self):
"""
For an HTTP 1.0 request for a resource with a known last modified time,
L{http.Request.write} sends an HTTP Response-Line, whatever response
headers are set, and a last-modified header with that time.
"""
channel = DummyChannel()
req = http.Request(channel, False)
trans = StringTransport()
channel.transport = trans
req.setResponseCode(200)
req.clientproto = b"HTTP/1.0"
req.lastModified = 0
req.responseHeaders.setRawHeaders(b"test", [b"lemur"])
req.write(b'Hello')
self.assertResponseEquals(
trans.value(),
[(b"HTTP/1.0 200 OK",
b"Test: lemur",
b"Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT",
b"Hello")])
def test_receivedCookiesDefault(self):
"""
L{http.Request.received_cookies} defaults to an empty L{dict}.
"""
req = http.Request(DummyChannel(), False)
self.assertEqual(req.received_cookies, {})
def test_parseCookies(self):
"""
L{http.Request.parseCookies} extracts cookies from C{requestHeaders}
and adds them to C{received_cookies}.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'test="lemur"; test2="panda"'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_parseCookiesMultipleHeaders(self):
"""
L{http.Request.parseCookies} can extract cookies from multiple Cookie
headers.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'test="lemur"', b'test2="panda"'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_parseCookiesNoCookie(self):
"""
L{http.Request.parseCookies} can be called on a request without a
cookie header.
"""
req = http.Request(DummyChannel(), False)
req.parseCookies()
self.assertEqual(req.received_cookies, {})
def test_parseCookiesEmptyCookie(self):
"""
L{http.Request.parseCookies} can be called on a request with an
empty cookie header.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [])
req.parseCookies()
self.assertEqual(req.received_cookies, {})
def test_parseCookiesIgnoreValueless(self):
"""
L{http.Request.parseCookies} ignores cookies which don't have a
value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo; bar; baz;'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {})
def test_parseCookiesEmptyValue(self):
"""
L{http.Request.parseCookies} parses cookies with an empty value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo='])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b''})
def test_parseCookiesRetainRightSpace(self):
"""
L{http.Request.parseCookies} leaves trailing whitespace in the
cookie value.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'foo=bar '])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b'bar '})
def test_parseCookiesStripLeftSpace(self):
"""
L{http.Request.parseCookies} strips leading whitespace in the
cookie key.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b' foo=bar'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b'foo': b'bar'})
def test_parseCookiesContinueAfterMalformedCookie(self):
"""
L{http.Request.parseCookies} parses valid cookies set before or
after malformed cookies.
"""
req = http.Request(DummyChannel(), False)
req.requestHeaders.setRawHeaders(
b"cookie", [b'12345; test="lemur"; 12345; test2="panda"; 12345'])
req.parseCookies()
self.assertEqual(
req.received_cookies, {b"test": b'"lemur"', b"test2": b'"panda"'})
def test_connectionLost(self):
"""
L{http.Request.connectionLost} closes L{Request.content} and drops the
reference to the L{HTTPChannel} to assist with garbage collection.
"""
req = http.Request(DummyChannel(), False)
# Cause Request.content to be created at all.
req.gotLength(10)
# Grab a reference to content in case the Request drops it later on.
content = req.content
# Put some bytes into it
req.handleContentChunk(b"hello")
# Then something goes wrong and content should get closed.
req.connectionLost(Failure(ConnectionLost("Finished")))
self.assertTrue(content.closed)
self.assertIdentical(req.channel, None)
def test_registerProducerTwiceFails(self):
"""
Calling L{Request.registerProducer} when a producer is already
registered raises ValueError.
"""
req = http.Request(DummyChannel(), False)
req.registerProducer(DummyProducer(), True)
self.assertRaises(
ValueError, req.registerProducer, DummyProducer(), True)
def test_registerProducerWhenNotQueuedRegistersPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is not queued registers the producer as a push producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, True)
self.assertEqual([(producer, True)], req.transport.producers)
def test_registerProducerWhenNotQueuedRegistersPullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is not queued registers the producer as a pull producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, False)
self.assertEqual([(producer, False)], req.transport.producers)
def test_connectionLostNotification(self):
"""
L{Request.connectionLost} triggers all finish notification Deferreds
and cleans up per-request state.
"""
d = DummyChannel()
request = http.Request(d, True)
finished = request.notifyFinish()
request.connectionLost(Failure(ConnectionLost("Connection done")))
self.assertIdentical(request.channel, None)
return self.assertFailure(finished, ConnectionLost)
def test_finishNotification(self):
"""
L{Request.finish} triggers all finish notification Deferreds.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.finish()
return finished
def test_writeAfterFinish(self):
"""
Calling L{Request.write} after L{Request.finish} has been called results
in a L{RuntimeError} being raised.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.write(b'foobar')
request.finish()
self.assertRaises(RuntimeError, request.write, b'foobar')
return finished
def test_finishAfterConnectionLost(self):
"""
Calling L{Request.finish} after L{Request.connectionLost} has been
called results in a L{RuntimeError} being raised.
"""
channel = DummyChannel()
req = http.Request(channel, False)
req.connectionLost(Failure(ConnectionLost("The end.")))
self.assertRaises(RuntimeError, req.finish)
def test_reprUninitialized(self):
"""
L{Request.__repr__} returns the class name, object address, and
dummy-place holder values when used on a L{Request} which has not yet
been initialized.
"""
request = http.Request(DummyChannel(), False)
self.assertEqual(
repr(request),
'<Request at 0x%x method=(no method yet) uri=(no uri yet) '
'clientproto=(no clientproto yet)>' % (id(request),))
def test_reprInitialized(self):
"""
L{Request.__repr__} returns, as a L{str}, the class name, object
address, and the method, uri, and client protocol of the HTTP request
it represents. The string is in the form::
<Request at ADDRESS method=METHOD uri=URI clientproto=PROTOCOL>
"""
request = http.Request(DummyChannel(), False)
request.clientproto = b'HTTP/1.0'
request.method = b'GET'
request.uri = b'/foo/bar'
self.assertEqual(
repr(request),
'<Request at 0x%x method=GET uri=/foo/bar '
'clientproto=HTTP/1.0>' % (id(request),))
def test_reprSubclass(self):
"""
Subclasses of L{Request} inherit a C{__repr__} implementation which
includes the subclass's name in place of the string C{"Request"}.
"""
class Otherwise(http.Request):
pass
request = Otherwise(DummyChannel(), False)
self.assertEqual(
repr(request),
'<Otherwise at 0x%x method=(no method yet) uri=(no uri yet) '
'clientproto=(no clientproto yet)>' % (id(request),))
def test_unregisterNonQueuedNonStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a non-queued non-streaming
producer from the request and the request's transport.
"""
req = http.Request(DummyChannel(), False)
req.transport = StringTransport()
req.registerProducer(DummyProducer(), False)
req.unregisterProducer()
self.assertEqual((None, None), (req.producer, req.transport.producer))
def test_unregisterNonQueuedStreamingProducer(self):
"""
L{Request.unregisterProducer} unregisters a non-queued streaming
producer from the request and the request's transport.
"""
req = http.Request(DummyChannel(), False)
req.transport = StringTransport()
req.registerProducer(DummyProducer(), True)
req.unregisterProducer()
self.assertEqual((None, None), (req.producer, req.transport.producer))
def test_finishProducesLog(self):
"""
L{http.Request.finish} will call the channel's factory to produce a log
message.
"""
factory = http.HTTPFactory()
factory.timeOut = None
factory._logDateTime = "sometime"
factory._logDateTimeCall = True
factory.startFactory()
factory.logFile = NativeStringIO()
proto = factory.buildProtocol(None)
val = [
b"GET /path HTTP/1.1\r\n",
b"\r\n\r\n"
]
trans = StringTransport()
proto.makeConnection(trans)
for x in val:
proto.dataReceived(x)
proto._channel.requests[0].finish()
# A log message should be written out
self.assertIn('sometime "GET /path HTTP/1.1"',
factory.logFile.getvalue())
def test_requestBodyTimeoutFromFactory(self):
"""
L{HTTPChannel} timeouts whenever data from a request body is not
delivered to it in time, even when it gets built from a L{HTTPFactory}.
"""
clock = Clock()
factory = http.HTTPFactory(timeout=100, reactor=clock)
factory.startFactory()
protocol = factory.buildProtocol(None)
transport = StringTransport()
# Confirm that the timeout is what we think it is.
self.assertEqual(protocol.timeOut, 100)
# This is a terrible violation of the abstraction later of
# _genericHTTPChannelProtocol, but we need to do it because
# policies.TimeoutMixin doesn't accept a reactor on the object.
# See https://twistedmatrix.com/trac/ticket/8488
protocol._channel.callLater = clock.callLater
protocol.makeConnection(transport)
protocol.dataReceived(b'POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(99)
self.assertFalse(transport.disconnecting)
clock.advance(2)
self.assertTrue(transport.disconnecting)
def test_finishCleansConnection(self):
"""
L{http.Request.finish} will notify the channel that it is finished, and
will put the transport back in the producing state so that the reactor
can close the connection.
"""
factory = http.HTTPFactory()
factory.timeOut = None
factory._logDateTime = "sometime"
factory._logDateTimeCall = True
factory.startFactory()
factory.logFile = NativeStringIO()
proto = factory.buildProtocol(None)
val = [
b"GET /path HTTP/1.1\r\n",
b"Connection: close\r\n",
b"\r\n\r\n"
]
trans = StringTransport()
proto.makeConnection(trans)
self.assertEqual(trans.producerState, 'producing')
for x in val:
proto.dataReceived(x)
self.assertEqual(trans.producerState, 'paused')
proto._channel.requests[0].finish()
self.assertEqual(trans.producerState, 'producing')
class MultilineHeadersTests(unittest.TestCase):
"""
Tests to exercise handling of multiline headers by L{HTTPClient}. RFCs 1945
(HTTP 1.0) and 2616 (HTTP 1.1) state that HTTP message header fields can
span multiple lines if each extra line is preceded by at least one space or
horizontal tab.
"""
def setUp(self):
"""
Initialize variables used to verify that the header-processing functions
are getting called.
"""
self.handleHeaderCalled = False
self.handleEndHeadersCalled = False
# Dictionary of sample complete HTTP header key/value pairs, including
# multiline headers.
expectedHeaders = {b'Content-Length': b'10',
b'X-Multiline' : b'line-0\tline-1',
b'X-Multiline2' : b'line-2 line-3'}
def ourHandleHeader(self, key, val):
"""
Dummy implementation of L{HTTPClient.handleHeader}.
"""
self.handleHeaderCalled = True
self.assertEqual(val, self.expectedHeaders[key])
def ourHandleEndHeaders(self):
"""
Dummy implementation of L{HTTPClient.handleEndHeaders}.
"""
self.handleEndHeadersCalled = True
def test_extractHeader(self):
"""
A header isn't processed by L{HTTPClient.extractHeader} until it is
confirmed in L{HTTPClient.lineReceived} that the header has been
received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
c.lineReceived(b'Content-Length: 10')
self.assertIdentical(c.length, None)
self.assertFalse(self.handleHeaderCalled)
self.assertFalse(self.handleEndHeadersCalled)
# Signal end of headers.
c.lineReceived(b'')
self.assertTrue(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.length, 10)
def test_noHeaders(self):
"""
An HTTP request with no headers will not cause any calls to
L{handleHeader} but will cause L{handleEndHeaders} to be called on
L{HTTPClient} subclasses.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
# Signal end of headers.
c.lineReceived(b'')
self.assertFalse(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
def test_multilineHeaders(self):
"""
L{HTTPClient} parses multiline headers by buffering header lines until
an empty line or a line that does not start with whitespace hits
lineReceived, confirming that the header has been received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived(b'HTTP/1.0 201')
c.lineReceived(b'X-Multiline: line-0')
self.assertFalse(self.handleHeaderCalled)
# Start continuing line with a tab.
c.lineReceived(b'\tline-1')
c.lineReceived(b'X-Multiline2: line-2')
# The previous header must be complete, so now it can be processed.
self.assertTrue(self.handleHeaderCalled)
# Start continuing line with a space.
c.lineReceived(b' line-3')
c.lineReceived(b'Content-Length: 10')
# Signal end of headers.
c.lineReceived(b'')
self.assertTrue(self.handleEndHeadersCalled)
self.assertEqual(c.version, b'HTTP/1.0')
self.assertEqual(c.status, b'201')
self.assertEqual(c.length, 10)
class Expect100ContinueServerTests(unittest.TestCase, ResponseTestMixin):
"""
Test that the HTTP server handles 'Expect: 100-continue' header correctly.
The tests in this class all assume a simplistic behavior where user code
cannot choose to deny a request. Once ticket #288 is implemented and user
code can run before the body of a POST is processed this should be
extended to support overriding this behavior.
"""
def test_HTTP10(self):
"""
HTTP/1.0 requests do not get 100-continue returned, even if 'Expect:
100-continue' is included (RFC 2616 10.1.1).
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandler
channel.makeConnection(transport)
channel.dataReceived(b"GET / HTTP/1.0\r\n")
channel.dataReceived(b"Host: www.example.com\r\n")
channel.dataReceived(b"Content-Length: 3\r\n")
channel.dataReceived(b"Expect: 100-continue\r\n")
channel.dataReceived(b"\r\n")
self.assertEqual(transport.value(), b"")
channel.dataReceived(b"abc")
self.assertResponseEquals(
transport.value(),
[(b"HTTP/1.0 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.0",
b"Request: /",
b"'''\n3\nabc'''\n")])
def test_expect100ContinueHeader(self):
"""
If a HTTP/1.1 client sends a 'Expect: 100-continue' header, the server
responds with a 100 response code before handling the request body, if
any. The normal resource rendering code will then be called, which
will send an additional response code.
"""
transport = StringTransport()
channel = http.HTTPChannel()
channel.requestFactory = DummyHTTPHandler
channel.makeConnection(transport)
channel.dataReceived(b"GET / HTTP/1.1\r\n")
channel.dataReceived(b"Host: www.example.com\r\n")
channel.dataReceived(b"Expect: 100-continue\r\n")
channel.dataReceived(b"Content-Length: 3\r\n")
# The 100 continue response is not sent until all headers are
# received:
self.assertEqual(transport.value(), b"")
channel.dataReceived(b"\r\n")
# The 100 continue response is sent *before* the body is even
# received:
self.assertEqual(transport.value(), b"HTTP/1.1 100 Continue\r\n\r\n")
channel.dataReceived(b"abc")
response = transport.value()
self.assertTrue(
response.startswith(b"HTTP/1.1 100 Continue\r\n\r\n"))
response = response[len(b"HTTP/1.1 100 Continue\r\n\r\n"):]
self.assertResponseEquals(
response,
[(b"HTTP/1.1 200 OK",
b"Command: GET",
b"Content-Length: 13",
b"Version: HTTP/1.1",
b"Request: /",
b"'''\n3\nabc'''\n")])
def sub(keys, d):
"""
Create a new dict containing only a subset of the items of an existing
dict.
@param keys: An iterable of the keys which will be added (with values from
C{d}) to the result.
@param d: The existing L{dict} from which to copy items.
@return: The new L{dict} with keys given by C{keys} and values given by the
corresponding values in C{d}.
@rtype: L{dict}
"""
return dict([(k, d[k]) for k in keys])
class DeprecatedRequestAttributesTests(unittest.TestCase):
"""
Tests for deprecated attributes of L{twisted.web.http.Request}.
"""
def test_getClient(self):
"""
L{Request.getClient} is deprecated in favor of resolving the hostname
in application code.
"""
channel = DummyChannel()
request = http.Request(channel, True)
request.gotLength(123)
request.requestReceived(b"GET", b"/", b"HTTP/1.1")
expected = channel.transport.getPeer().host
self.assertEqual(expected, request.getClient())
warnings = self.flushWarnings(
offendingFunctions=[self.test_getClient])
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.getClient was deprecated "
"in Twisted 15.0.0; please use Twisted Names to "
"resolve hostnames instead")},
sub(["category", "message"], warnings[0]))
def test_noLongerQueued(self):
"""
L{Request.noLongerQueued} is deprecated, as we no longer process
requests simultaneously.
"""
channel = DummyChannel()
request = http.Request(channel)
request.noLongerQueued()
warnings = self.flushWarnings(
offendingFunctions=[self.test_noLongerQueued])
self.assertEqual(1, len(warnings))
self.assertEqual({
"category": DeprecationWarning,
"message": (
"twisted.web.http.Request.noLongerQueued was deprecated "
"in Twisted 16.3.0")},
sub(["category", "message"], warnings[0]))
| 33.130495
| 80
| 0.600896
|
6433b446f2bc5bfab8df3010dff148f00665ef22
| 13,609
|
py
|
Python
|
2020/Genders/gendersdb_pkg/gendersdb/__main__.py
|
BryanWhitehurst/HPCCEA
|
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
|
[
"MIT"
] | 10
|
2019-08-12T23:00:20.000Z
|
2021-08-06T17:06:48.000Z
|
2020/Genders/gendersdb_pkg/gendersdb/__main__.py
|
BryanWhitehurst/HPCCEA
|
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
|
[
"MIT"
] | 5
|
2020-06-18T23:51:58.000Z
|
2021-07-28T17:50:34.000Z
|
2020/Genders/gendersdb_pkg/gendersdb/__main__.py
|
BryanWhitehurst/HPCCEA
|
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
|
[
"MIT"
] | 21
|
2019-06-10T21:03:03.000Z
|
2021-08-06T17:57:25.000Z
|
from hostlist import hostlist
import genders
import mysql.connector
from mysql.connector import Error
import argparse
from gendersdb import loaddata
from gendersdb import setP
import sys
# Connects to the genders database.
def connectDatabase():
# block tests if gender database exists already
try:
try:
# Change this path later
with open("passW.txt") as f:
password = [line.rstrip("\n") for line in f][0]
except Error as e:
print(e)
print("Please add your password using --password.")
config = {
"user": "root",
"password": f"{password}",
"host": "localhost",
"database": "gender",
}
mydb = mysql.connector.connect(**config)
if mydb.is_connected():
cursor = mydb.cursor(buffered=True, dictionary=True)
# if it does not exists runs create.sql script
except Error as e:
print(e) # Output a message instead
config = {"user": "root", "password": f"{password}", "host": "localhost"}
mydb = mysql.connector.connect(**config)
cursor = mydb.cursor(buffered=True, dictionary=True)
# Open and read the file as a single buffer
fd = open("create.sql", "r")
sqlFile = fd.read()
fd.close()
sqlCommands = sqlFile.split(";")
# Execute every command from the input file
for command in sqlCommands:
# This will skip and report errors
try:
cursor.execute(command)
except Error as e:
print("Command skipped: ", e)
return mydb
def allNodes(mydb):
sql = "SELECT DISTINCT node_name FROM NODE"
cur = mydb.cursor(buffered=True, dictionary=True)
cur.execute(sql)
records = cur.fetchall()
return records
def getVals(mydb, gender_name):
gender_name = str(gender_name)
sql = "SELECT val,node_name FROM CONFIGURATION WHERE gender_name = %s"
cur = mydb.cursor(buffered=True, dictionary=True)
val = (gender_name,)
cur.execute(sql, val)
records = cur.fetchall()
return records
def getUVals(mydb, gender_name):
gender_name = str(gender_name)
sql = "SELECT DISTINCT val FROM CONFIGURATION WHERE gender_name = %s"
cur = mydb.cursor(buffered=True, dictionary=True)
val = (gender_name,)
cur.execute(sql, val)
records = cur.fetchall()
return records
def getValinNode(mydb, gender_name, node_name):
sql = "SELECT val FROM CONFIGURATION WHERE gender_name = %s AND node_name = %s"
val = (gender_name, node_name)
cur = mydb.cursor(buffered=True, dictionary=True)
cur.execute(
sql, val,
)
records = cur.fetchall()
return records
def findNodes(mydb, gender_namei):
sql = "SELECT DISTINCT n.node_name FROM NODE n JOIN CONFIGURATION c WHERE (n.node_name = c.node_name AND c.gender_name = %s )"
val = (gender_namei,)
cur = mydb.cursor(buffered=True, dictionary=True)
cur.execute(sql, val)
records = cur.fetchall()
return records
def findGenders(mydb, node_namei):
sql = "SELECT DISTINCT g.gender_name FROM GENDER g JOIN CONFIGURATION c WHERE (g.gender_name = c.gender_name AND c.node_name = %s)"
val = (node_namei,)
cur = mydb.cursor(buffered=True, dictionary=True)
cur.execute(sql, val)
records = cur.fetchall()
return records
def findGendersandValues(mydb, node_namei):
query = "SELECT gender_name, val FROM CONFIGURATION WHERE node_name=%s"
cursor = mydb.cursor()
cursor.execute(query, (node_namei,))
return cursor.fetchall()
def allGenders(mydb):
sql = "SELECT DISTINCT gender_name FROM GENDER"
cur = mydb.cursor(buffered=True, dictionary=True)
cur.execute(sql)
records = cur.fetchall()
return records
def parsedefault(inp):
gen = genders.Genders("/etc/genders")
if len(inp) == 1:
node = gen.getnodename()
attr = inp[0]
elif len(inp) == 2:
node = inp[0]
attr = inp[1]
else:
parser.error("Too many arguments.")
return node, attr
def X(attr, excludeattr, mydb):
cursor = mydb.cursor()
query = "SELECT DISTINCT node_name FROM CONFIGURATION WHERE gender_name=%s AND node_name NOT IN (SELECT node_name FROM CONFIGURATION WHERE gender_name=%s)"
cursor.execute(query, (attr, excludeattr))
results = cursor.fetchall()
return results
def main():
parser = argparse.ArgumentParser(description="Connect with database")
parser.add_argument("-password", action="store_true")
parser.add_argument("-load", action="store_true")
parser.add_argument(
"-descrip",
nargs="*",
metavar=("gender", "description"),
help="adds a description to the given gender",
)
parser.add_argument(
"-dd", help="drops entire database", action="store_true", dest="dd"
)
parser.add_argument(
"-q",
nargs="*",
help="prints list of nodes having the specified attribute in host range",
action="store",
dest="hostlist",
)
parser.add_argument(
"-Q",
nargs="*",
help="returns 0 if attribute exists in nide else 1, if no node specified checks entire database",
action="store",
)
parser.add_argument(
"-c",
nargs="*",
help="prints list of nodes having specified attribute in comma seperated format",
action="store",
dest="comma",
)
parser.add_argument(
"-n",
nargs="*",
help="prints list of nodes having specified attribute in newline separated list",
action="store",
dest="newline",
)
parser.add_argument(
"-s",
nargs="*",
help="prints list of nodes having specified attribute in space separated list",
action="store",
dest="space",
)
parser.add_argument(
"-v",
metavar=("node", "attr"),
nargs="+",
help="outputs values associated with gender on a particular node",
)
parser.add_argument(
"-vv",
nargs=1,
help="outputs values associated with gender and with node listed",
action="store",
dest="valuesWnodes",
)
parser.add_argument(
"-l",
nargs="*",
help="list of attributes for a particular node, if no node all attributes in database",
)
parser.add_argument(
"-V",
nargs="*",
help="outputs all values associated with gender, if U is specified only unqiue values",
)
parser.add_argument("-U", help="V will only output unique values")
parser.add_argument(
"-A", action="store_true", help="prints all nodes in the desired format"
)
parser.add_argument("-X", nargs="*", help="exclude node from query")
parser.add_argument("-XX", nargs="*", help="exlcude node from query")
results = parser.parse_args()
if results.password:
setP.store()
sys.exit()
# finds nodes w specified gender in hostlist format
mydb = connectDatabase() # If the database doesn't exist
if results.load:
loaddata.loaddata(mydb)
if results.hostlist != None:
finLi = []
records = []
prev = False
hosts = ""
clusterN = ""
if results.X != None:
records = X(results.hostlist[0], results.X[0], mydb)
elif results.XX != None:
record = findNodes(mydb, str(results.hostlist[0]))
for row in record:
if row["node_name"] != results.XX[0]:
records.append(row)
elif results.A:
records = allNodes(mydb)
else:
records = findNodes(mydb, str(results.hostlist[0]))
if (len(records)) > 0:
cluster0 = records[0]
if results.X != None:
cluster0 = cluster0[0]
else:
cluster0 = cluster0["node_name"]
cluster0 = cluster0[:-1]
for row in records:
clusterT = ""
if results.X != None:
clusterT = row[0]
else:
clusterT = row["node_name"]
clusterT = clusterT[:-1]
if cluster0 == clusterT:
if results.X != None:
hosts += row[0] + ","
else:
hosts += row["node_name"] + ","
prev = True
elif cluster0 != clusterT and prev == True:
finLi.append(hosts)
hosts = ""
if results.X != None:
hosts += row[0] + ","
else:
hosts += row["node_name"] + ","
prev = False
elif cluster0 != clusterT and prev == False:
hosts = ""
if results.X != None:
hosts += row[0] + ","
else:
hosts += row["node_name"] + ","
prev = True
cluster0 = clusterT
finLi.append(hosts)
for y in finLi:
y = y[:-1]
y = hostlist.compress_range(y)
print(y, end=" ")
print()
if results.comma != None:
finLi = []
if results.X != None:
records = X(results.comma[0], results.X[0], mydb)
elif results.A:
records = allNodes(mydb)
else:
records = findNodes(mydb, str(results.comma[0]))
if results.XX != None:
for row in records:
if row["node_name"] != results.XX[0]:
finLi.append(row["node_name"])
else:
for row in records:
if results.X:
finLi.append(row[0])
else:
finLi.append(row["node_name"])
print(*finLi, sep=", ")
if results.newline != None:
if results.X != None:
records = X(results.newline[0], results.X[0], mydb)
elif results.A:
records = allNodes(mydb)
else:
records = findNodes(mydb, str(results.newline[0]))
if results.XX != None:
for row in records:
if row["node_name"] != results.XX[0]:
print(row["node_name"])
else:
for row in records:
if results.X != None:
print(row[0])
else:
print(row["node_name"])
if results.space != None:
if results.X != None:
records = X(results.space[0], results.X[0], mydb)
elif results.A:
records = allNodes(mydb)
else:
records = findNodes(mydb, str(results.space[0]))
if results.XX != None:
for row in records:
if row["node_name"] != results.XX[0]:
print(row["node_name"], end=" ")
else:
for row in records:
if results.X != None:
print(row[0], end=" ")
else:
print(row["node_name"], end=" ")
print()
if results.V != None:
if len(results.V) == 0:
if results.U != None:
records = getUVals(mydb, results.U)
for row in records:
print(row["val"])
if len(results.V) == 1:
records = getVals(mydb, results.V[0])
for row in records:
print(row["val"])
if results.v != None:
cursor = mydb.cursor()
node, attr = parsedefault(results.v)
query = "SELECT val FROM CONFIGURATION WHERE node_name=%s && gender_name=%s"
cursor.execute(query, (node, attr))
result = cursor.fetchall()
if len(result) == 0:
parser.error("node or attribute not found")
else:
if result[0][0] != None:
print(result[0][0])
if results.Q != None:
if len(results.Q) == 1:
records = findNodes(mydb, str(results.Q[0]))
if len(records) > 0:
sys.exit(0)
else:
sys.exit(1)
if len(results.Q) == 2:
records = findNodes(mydb, results.Q[1])
for rec in records:
if rec["node_name"] == results.Q[0]:
sys.exit(0)
sys.exit(1)
if results.valuesWnodes != None:
records = getVals(mydb, *results.valuesWnodes)
for row in records:
print(row["node_name"], " ", row["val"])
if results.l != None:
if len(results.l) == 1:
records = findGendersandValues(mydb, *results.l)
else:
records = allGenders(mydb)
for row in records:
if len(results.l) == 1:
if row[1] == None:
print(row[0])
else:
print(row[0] + "=" + row[1])
else:
print(row["gender_name"])
if results.descrip != None:
cursor = mydb.cursor()
gender = results.descrip[0]
descrip = results.descrip[1]
query = "UPDATE GENDER SET descrip=%s WHERE gender_name=%s"
cursor.execute(query, (descrip, gender))
mydb.commit()
if results.dd:
sql = "DROP DATABASE gender"
cur = mydb.cursor(buffered=True, dictionary=True)
cur.execute(sql)
if __name__ == "__main__":
main()
| 31.796729
| 159
| 0.537806
|
e2c12fbfb2a08e9a9759e00b241e404bfabfa1b7
| 2,535
|
py
|
Python
|
cartoon_image_smoothing.py
|
TobiasSunderdiek/cartoon-gan
|
482cc9ef9c14b1869442e5e6b5b8a9c6bcfe5d6f
|
[
"MIT"
] | 42
|
2020-02-12T07:55:29.000Z
|
2022-03-31T14:02:13.000Z
|
cartoon_image_smoothing.py
|
TobiasSunderdiek/cartoon-gan
|
482cc9ef9c14b1869442e5e6b5b8a9c6bcfe5d6f
|
[
"MIT"
] | 4
|
2020-05-29T03:12:46.000Z
|
2022-01-12T10:52:09.000Z
|
cartoon_image_smoothing.py
|
TobiasSunderdiek/cartoon-gan
|
482cc9ef9c14b1869442e5e6b5b8a9c6bcfe5d6f
|
[
"MIT"
] | 15
|
2020-04-14T12:57:03.000Z
|
2022-03-12T02:02:07.000Z
|
import os
import cv2
from PIL import Image, ImageFilter
import downloader
PATH_TO_STORED_CARTOON_IMAGES = './safebooru/'
PATH_TO_STORE_SMOOTHED_IMAGES = './safebooru_smoothed/'
SMOOTHED_IMAGES_ZIPFILE_NAME = './safebooru_smoothed'
def main():
if not os.path.exists(PATH_TO_STORED_CARTOON_IMAGES):
print("Can not smooth images, path does not exist: ", PATH_TO_STORED_CARTOON_IMAGES)
if not os.path.exists(PATH_TO_STORE_SMOOTHED_IMAGES):
os.makedirs(PATH_TO_STORE_SMOOTHED_IMAGES)
for filename in os.listdir(PATH_TO_STORED_CARTOON_IMAGES):
cartoon_images_filename = PATH_TO_STORED_CARTOON_IMAGES + filename
smoothed_images_filename = PATH_TO_STORE_SMOOTHED_IMAGES + filename
if not os.path.exists(smoothed_images_filename):
edge_smoothing(cartoon_images_filename, smoothed_images_filename)
else:
print("Skipping file, already exists, ", cartoon_images_filename)
downloader.zip_images(SMOOTHED_IMAGES_ZIPFILE_NAME, PATH_TO_STORE_SMOOTHED_IMAGES)
def edge_smoothing(cartoon_images_filename, smoothed_images_filename):
print("Edge-smoothing of ", cartoon_images_filename)
origin = cv2.imread(cartoon_images_filename)
edges = createEdgesOverlay(origin)
result = overlayEdges(edges, origin)
result.save(smoothed_images_filename, "JPEG")
def overlayEdges(edges, origin):
background = transformFromCV2ToPillowImageFormat(origin)
background.paste(edges, (0, 0), edges)
background = background.convert("RGB")
return background
def transformFromCV2ToPillowImageFormat(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
return Image.fromarray(img)
def createEdgesOverlay(origin):
edges = cv2.Canny(origin, 30, 300, 3)
edges = cv2.dilate(edges, (3, 3))
edges = cv2.bitwise_not(edges)
edges = transformFromCV2ToPillowImageFormat(edges)
makeWhiteBackgroundTransparent(edges)
edges = edges.filter(ImageFilter.GaussianBlur) #do blurring here because doing it before making background transparent results in white halo
return edges
# got this from here:
# https://stackoverflow.com/questions/765736/using-pil-to-make-all-white-pixels-transparent/4531395
def makeWhiteBackgroundTransparent(img):
datas = img.getdata()
newData = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newData.append((255, 255, 255, 0))
else:
newData.append(item)
img.putdata(newData)
if __name__ == '__main__':
main()
| 37.279412
| 144
| 0.743984
|
dfecd34df1f67b390ff3f843a7f5c7621780d428
| 15,066
|
py
|
Python
|
src_CPU/boruvka.py
|
P4-Distributed-Algorithm/P4-Distributed-Algorithm
|
4e2a46590fc6d32414ec4dbb7f86cc14fa4d9b18
|
[
"Apache-2.0"
] | null | null | null |
src_CPU/boruvka.py
|
P4-Distributed-Algorithm/P4-Distributed-Algorithm
|
4e2a46590fc6d32414ec4dbb7f86cc14fa4d9b18
|
[
"Apache-2.0"
] | null | null | null |
src_CPU/boruvka.py
|
P4-Distributed-Algorithm/P4-Distributed-Algorithm
|
4e2a46590fc6d32414ec4dbb7f86cc14fa4d9b18
|
[
"Apache-2.0"
] | 1
|
2021-06-26T09:01:09.000Z
|
2021-06-26T09:01:09.000Z
|
import argparse
import threading
import time
import netns
import pdb
import random
import os
import copy
from enum import Enum
INF = 1000000000
CAST_BELONGS = 0
REMOVE_EDGES = 1
CAST_MINIMUM = 2
CONNECT = 3
MIN_BROADCAST = 0
MIN_REPLY = 1
FIN_BROADCAST = 2
PULL_REQ = 3
PULL_REPLY = 4
PUSH_REQ = 5
ADDEDGE_REQ = 6
ADDEDGE_REPLY = 7
TERMINATE = 8
arg_parser = argparse.ArgumentParser(description="")
arg_parser.add_argument("--name", type=str, required=True)
arg_parser.add_argument("--pid", type=int, required=True)
arg_parser.add_argument("--cnt", type=int, required=True)
args = arg_parser.parse_args()
host_name, host_mnet_pid, node_cnt = args.name, args.pid, args.cnt
with netns.NetNS(nspid=host_mnet_pid):
from parser import NORMAL_PKT, send_packets
from scapy.sendrecv import AsyncSniffer
from scapy.sessions import IPSession
from sync_framework import Synchronized_Framework, pkt_callback
from scapy.arch.linux import get_if_list
from utils import *
from parser import *
from datetime import datetime
import scapy
from scapy.all import *
class Payload_Format(Packet):
name = "Payload_Format"
fields_desc = [
BitField("req_type", 0, 8),
BitField("id", 0, 32),
BitField("flag", 0, 8),
BitField("min_val", 0, 32)
]
def initializer(log_file, host_name, neighbor, root_name, node_cnt, **kwargs):
cb_params = {}
cb_params["stage"] = 0
cb_params["log_file"] = log_file
cb_params["host_name"] = host_name
cb_params["neighbor"] = copy.deepcopy(neighbor)
cb_params["neighbor_cnt"] = len(neighbor.keys())
cb_params["root_name"] = root_name
cb_params["node_cnt"] = node_cnt
cb_params["self_id"] = int(host_name[1: ])
cb_params["component_size"] = 1
fin = open("graph_weight.txt", "r")
while True:
try:
name1, ip1, name2, ip2, weight = fin.readline().split()
if name1 != host_name:
name1 = name2
ip2 = ip1
if name1 == host_name:
cb_params["neighbor"][ip2] = int(weight)
except:
break
fin.close()
cb_params["sub_stage"] = CAST_MINIMUM ^ 1
cb_params["sons"] = set()
cb_params["sons_cnt"] = 0
cb_params["father"] = -1
cb_params["new_sons"] = []
cb_params["deleted_sons"] = []
cb_params["deleted_neighbors"] = set()
cb_params["new_father"] = -1
return cb_params
def stage_initializer(_, cb_params):
cb_params["stage"] += 1
cb_params["sub_stage"] = (cb_params["sub_stage"] + 1) % 4
if cb_params["sub_stage"] == CAST_BELONGS:
for son in cb_params["new_sons"]:
cb_params["sons"].add(son)
cb_params["sons_cnt"] += 1
for son in cb_params["deleted_sons"]:
if son not in cb_params["sons"]:
print("Error: failed to delete a son at node {}".format(cb_params["host_name"]))
cb_params["sons"].remove(son)
cb_params["sons_cnt"] -= 1
cb_params["new_sons"] = []
cb_params["deleted_sons"] = []
if cb_params["new_father"] != -1:
cb_params["father"] = cb_params["new_father"]
cb_params["new_father"] = -1
cb_params["request_id"] = -1
cb_params["query_remain"] = 0
for neighbor in cb_params["neighbor"].keys():
if neighbor not in cb_params["sons"] and neighbor != cb_params["father"]:
cb_params["query_remain"] += 1
print("---{}: query_remain: {}---".format(host_name, cb_params["query_remain"]))
cb_params["log_file"].write("{} -> {}\n".format(cb_params["host_name"], cb_params["father"]))
cb_params["log_file"].flush()
elif cb_params["sub_stage"] == CAST_MINIMUM:
for neighbor in cb_params["deleted_neighbors"]:
cb_params["neighbor"].pop(neighbor)
cb_params["deleted_neighbors"] = set()
min_val = INF
min_ip = ""
for son in cb_params["neighbor"].keys():
if son not in cb_params["sons"] and son != cb_params["father"]:
weight = cb_params["neighbor"][son]
if weight < min_val:
min_val = weight
min_ip = son
print("------------ min_ip for host {} is {}".format(host_name, min_ip))
cb_params["min_val"] = min_val
cb_params["min_ip"] = min_ip
return cb_params
def __wrap_send(dst_ip, pkt_type, stage, buf, channel_id=0):
#print(host_name, dst_ip)
#buf.show()
send_packets(dst_ip, pkt_type, stage, buf, channel_id)
def terminate_broadcast(cb_params):
cb_params["log_file"].write("{} -> {}\n".format(cb_params["host_name"], cb_params["father"]))
for son in cb_params["sons"]:
__wrap_send(son, NORMAL_PKT, cb_params["stage"], buf=Payload_Format(req_type=TERMINATE))
def judge_and_restruct(cb_params):
assert(cb_params["father"] == -1)
finished = False
if cb_params["request_id"] == -1 or cb_params["request_id"] < cb_params["self_id"]: # connect
cb_params["new_father"] = cb_params["min_ip"]
if cb_params["min_ip"] not in cb_params["sons"]:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, cb_params["stage"], buf=Payload_Format(req_type=ADDEDGE_REQ, id=cb_params["self_id"], flag=1))
else:
cb_params["deleted_sons"].append(cb_params["min_ip"])
__wrap_send(cb_params["min_ip"], NORMAL_PKT, cb_params["stage"], buf=Payload_Format(req_type=PUSH_REQ, id=cb_params["self_id"], flag=1))
finished = True
for son in cb_params["sons"]:
if son != cb_params["min_ip"]:
__wrap_send(son, NORMAL_PKT, cb_params["stage"], buf=Payload_Format(req_type=FIN_BROADCAST))
else:
finished = True
for son in cb_params["sons"]:
__wrap_send(son, NORMAL_PKT, cb_params["stage"], buf=Payload_Format(req_type=FIN_BROADCAST))
return cb_params, finished
def callback(src_ip, stage, buf, cb_params, start_flag):
finished = False
terminate = False
self_id = cb_params["self_id"]
if start_flag == True:
if cb_params["sub_stage"] == CAST_BELONGS:
if cb_params["father"] == -1:
cb_params["belong_id"] = self_id
for son in cb_params["sons"]:
__wrap_send(son, NORMAL_PKT, stage, buf=Payload_Format(req_type=FIN_BROADCAST, flag=1, id=self_id))
finished = True
elif cb_params["sub_stage"] == REMOVE_EDGES:
finished = True
for neighbor in cb_params["neighbor"]:
if neighbor not in cb_params["sons"] and neighbor != cb_params["father"]:
finished = False
__wrap_send(neighbor, NORMAL_PKT, stage, buf=Payload_Format(req_type=ADDEDGE_REQ, flag=2, id=cb_params["belong_id"]))
# guarantee that nodes do nothing if they are not the root of a subtree
elif cb_params["sub_stage"] == CAST_MINIMUM:
if cb_params["father"] == -1:
for son in cb_params["sons"]:
__wrap_send(son, NORMAL_PKT, stage, buf=Payload_Format(req_type=MIN_BROADCAST))
cb_params["rem_cnt"] = cb_params["sons_cnt"]
if cb_params["rem_cnt"] == 0:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, stage, buf=Payload_Format(req_type=ADDEDGE_REQ, flag=0, id=self_id))
else:
if cb_params["father"] == -1:
min_ip = cb_params["min_ip"]
if min_ip not in cb_params["sons"]:
cb_params, finished = judge_and_restruct(cb_params)
else:
__wrap_send(min_ip, NORMAL_PKT, stage, buf=Payload_Format(req_type=PULL_REQ))
else:
buf = Payload_Format(raw(buf))
if buf.req_type == ADDEDGE_REQ:
_flag = 0
if buf.flag == 1:
cb_params["new_sons"].append(src_ip)
elif buf.flag == 2:
_flag = 1
if cb_params["belong_id"] == buf.id:
cb_params["deleted_neighbors"].add(src_ip)
_flag = 2
else:
if cb_params["min_ip"] == src_ip:
cb_params["request_id"] = buf.id
__wrap_send(src_ip, NORMAL_PKT, stage, buf=Payload_Format(req_type=ADDEDGE_REPLY, flag=_flag), channel_id=1)
elif buf.req_type == ADDEDGE_REPLY:
if buf.flag >= 1:
cb_params["query_remain"] -= 1
if cb_params["query_remain"] == 0:
finished = True
else:
finished = True
if buf.flag == 2:
cb_params["deleted_neighbors"].add(src_ip)
elif buf.req_type == TERMINATE:
terminate_broadcast(cb_params)
finished = True
terminate = True
elif buf.req_type == FIN_BROADCAST:
if buf.flag == 1:
cb_params["belong_id"] = buf.id
for son in cb_params["sons"]:
__wrap_send(son, NORMAL_PKT, stage, buf=Payload_Format(req_type=FIN_BROADCAST, id=buf.id, flag=buf.flag))
finished = True
elif buf.req_type == MIN_BROADCAST:
if cb_params["sub_stage"] != CAST_MINIMUM:
print("Error: Incorrect Sub_Stage")
for son in cb_params["sons"]:
__wrap_send(son, NORMAL_PKT, stage, buf=Payload_Format(req_type=MIN_BROADCAST))
cb_params["rem_cnt"] = cb_params["sons_cnt"]
if cb_params["rem_cnt"] == 0:
__wrap_send(cb_params["father"], NORMAL_PKT, stage, buf=Payload_Format(req_type=MIN_REPLY, min_val=cb_params["min_val"]))
elif buf.req_type == MIN_REPLY:
cb_params["rem_cnt"] -= 1
if cb_params["min_val"] > buf.min_val:
cb_params["min_val"] = buf.min_val
cb_params["min_ip"] = src_ip
if cb_params["rem_cnt"] == 0:
if cb_params["father"] != -1:
__wrap_send(cb_params["father"], NORMAL_PKT, stage, buf=Payload_Format(req_type=MIN_REPLY, min_val=cb_params["min_val"]))
else:
if cb_params["min_val"] == INF:
terminate = True
finished = True
terminate_broadcast(cb_params)
else:
for son in cb_params["sons"]:
if son != cb_params["min_ip"]:
__wrap_send(son, NORMAL_PKT, stage, buf=Payload_Format(req_type=FIN_BROADCAST))
if cb_params["min_ip"] not in cb_params["sons"]:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, stage, buf=Payload_Format(req_type=ADDEDGE_REQ, id=self_id, flag=0))
else:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, stage, buf=Payload_Format(req_type=PUSH_REQ, id=self_id, flag=0))
finished = True
elif buf.req_type == PULL_REQ:
if cb_params["min_ip"] not in cb_params["sons"]: # leaf edge
__wrap_send(cb_params["father"], NORMAL_PKT, stage, buf=Payload_Format(req_type=PULL_REPLY, id=cb_params["request_id"]))
else:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, stage, buf=Payload_Format(req_type=PULL_REQ))
elif buf.req_type == PULL_REPLY:
cb_params["request_id"] = buf.id
if cb_params["father"] != -1:
__wrap_send(cb_params["father"], NORMAL_PKT, stage, buf=Payload_Format(req_type=PULL_REPLY, id=cb_params["request_id"]))
else:
cb_params, finished = judge_and_restruct(cb_params)
else:
if buf.flag == 1:
if cb_params["min_ip"] in cb_params["sons"]:
cb_params["deleted_sons"].append(cb_params["min_ip"])
cb_params["new_father"] = cb_params["min_ip"]
cb_params["new_sons"].append(cb_params["father"])
for son in cb_params["sons"]:
if son != cb_params["min_ip"]:
__wrap_send(son, NORMAL_PKT, stage, buf=Payload_Format(req_type=FIN_BROADCAST))
if cb_params["min_ip"] not in cb_params["sons"]:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, stage, buf=Payload_Format(req_type=ADDEDGE_REQ, id=buf.id, flag=buf.flag))
else:
__wrap_send(cb_params["min_ip"], NORMAL_PKT, stage, buf=Payload_Format(req_type=PUSH_REQ, id=buf.id, flag=buf.flag))
finished = True
return finished, terminate, cb_params
def print_result(cb_params):
fout = open("tree_result.txt", "a+")
fin = open("graph_weight.txt", "r")
while True:
try:
name1, ip1, name2, ip2, weight = fin.readline().split()
if ip1 == cb_params["father"]:
fout.write("{} -> {}\n".format(name2, name1))
elif ip2 == cb_params["father"]:
fout.write("{} -> {}\n".format(name1, name2))
except:
break
fin.close()
fout.close()
framework = Synchronized_Framework(callback, initializer, stage_initializer, ([0, 1, 2, 3], 4), host_name, host_mnet_pid, "h1", node_cnt, term_stage=node_cnt-1)
sniffer = AsyncSniffer(iface=get_if_list(), prn=lambda x: pkt_callback(x, framework.queue), filter="udp", session=IPSession)
sniffer.start()
# first start sniffering, and then start loop (prevent missing packets)
time.sleep(1)
cb_params = framework.main_loop()
print_result(cb_params)
sniffer.stop()
print("Thread on host {} finished!".format(host_name))
| 40.391421
| 164
| 0.543276
|
41e35a72e21b6bb5842f3511c77ba56362a42eb5
| 887
|
py
|
Python
|
ds_algorithms/graphs/graph_bfs.py
|
elenaborisova/LeetCode-Solutions
|
98376aab7fd150a724e316357ae5ea46988d9eac
|
[
"MIT"
] | null | null | null |
ds_algorithms/graphs/graph_bfs.py
|
elenaborisova/LeetCode-Solutions
|
98376aab7fd150a724e316357ae5ea46988d9eac
|
[
"MIT"
] | null | null | null |
ds_algorithms/graphs/graph_bfs.py
|
elenaborisova/LeetCode-Solutions
|
98376aab7fd150a724e316357ae5ea46988d9eac
|
[
"MIT"
] | null | null | null |
def bfs(graph, start_vertex, target_value):
path = [start_vertex]
vertex_and_path = [start_vertex, path]
bfs_queue = [vertex_and_path]
visited = set()
while bfs_queue:
current_vertex, path = bfs_queue.pop(0)
visited.add(current_vertex)
for neighbor in graph[current_vertex]:
if neighbor not in visited:
if neighbor == target_value:
return path + [neighbor]
else:
bfs_queue.append([neighbor, path + [neighbor]])
the_most_dangerous_graph = {
'lava': set(['sharks', 'piranhas']),
'sharks': set(['lava', 'bees', 'lasers']),
'piranhas': set(['lava', 'crocodiles']),
'bees': set(['sharks']),
'lasers': set(['sharks', 'crocodiles']),
'crocodiles': set(['piranhas', 'lasers'])
}
print(bfs(the_most_dangerous_graph, "crocodiles", "bees"))
| 30.586207
| 67
| 0.588501
|
4892b60813e9c769f0ec68e679bf66b6ee1e74b6
| 28
|
py
|
Python
|
lambwaves/__init__.py
|
luispauloml/Lamb-Wave-Dispersion
|
a4f735fa4a3f232c0d2fc07ad0ab8b861b647159
|
[
"MIT"
] | null | null | null |
lambwaves/__init__.py
|
luispauloml/Lamb-Wave-Dispersion
|
a4f735fa4a3f232c0d2fc07ad0ab8b861b647159
|
[
"MIT"
] | null | null | null |
lambwaves/__init__.py
|
luispauloml/Lamb-Wave-Dispersion
|
a4f735fa4a3f232c0d2fc07ad0ab8b861b647159
|
[
"MIT"
] | null | null | null |
from .lambwaves import Lamb
| 14
| 27
| 0.821429
|
3a71efacba17710c57ebce4b89665c55862138f0
| 5,099
|
py
|
Python
|
RLBotPack/BotimusPrime/source/strategy/offense.py
|
joey676/RLBotPack
|
634245e548ad2d078e5e15035fd9999f418beea1
|
[
"MIT"
] | null | null | null |
RLBotPack/BotimusPrime/source/strategy/offense.py
|
joey676/RLBotPack
|
634245e548ad2d078e5e15035fd9999f418beea1
|
[
"MIT"
] | null | null | null |
RLBotPack/BotimusPrime/source/strategy/offense.py
|
joey676/RLBotPack
|
634245e548ad2d078e5e15035fd9999f418beea1
|
[
"MIT"
] | null | null | null |
from RLUtilities.GameInfo import GameInfo
from RLUtilities.LinearAlgebra import *
from RLUtilities.Simulation import Car, Ball
from utils.vector_math import *
from utils.math import *
from utils.misc import *
from utils.intercept import Intercept, AerialIntercept
from maneuvers.kit import Maneuver
from maneuvers.dribbling.dribble import Dribble
from maneuvers.air.aerial import Aerial
from maneuvers.strikes.dodge_shot import DodgeShot
from maneuvers.strikes.strike import Strike
from maneuvers.strikes.ground_shot import GroundShot
from maneuvers.strikes.mirror_shot import MirrorShot
from maneuvers.strikes.close_shot import CloseShot
from maneuvers.strikes.aerial_shot import AerialShot
from maneuvers.strikes.wall_shot import WallShot
from maneuvers.strikes.wall_dodge_shot import WallDodgeShot
from maneuvers.shadow_defense import ShadowDefense
class Offense:
def __init__(self, info: GameInfo):
self.info = info
def wall_shot(self, car: Car, target: vec3) -> Maneuver:
ground_shot = WallShot(car, self.info, target)
dodge_shot = WallDodgeShot(car, self.info, target)
if dodge_shot.intercept.time < ground_shot.intercept.time - 0.1:
return dodge_shot
return ground_shot
def direct_shot(self, car: Car, target: vec3) -> Maneuver:
dodge_shot = DodgeShot(car, self.info, target)
ground_shot = GroundShot(car, self.info, target)
if (
dodge_shot.intercept.time < ground_shot.intercept.time - 0.1
or distance(dodge_shot.intercept.ground_pos, target) < 4000
or (dot(direction(ground_shot.intercept.ground_pos, car), ground_shot.intercept.ball.vel) < -0.2 \
and norm(ground_shot.intercept.ball.vel) > 500)
):
if (
distance(dodge_shot.intercept.ground_pos, target) < 4000
and abs(dodge_shot.intercept.ground_pos[0]) < 3000
):
return CloseShot(car, self.info, target)
return dodge_shot
return ground_shot
def high_shot(self, car: Car, target: vec3) -> Maneuver:
direct_shot = self.direct_shot(car, target)
wall_shot = self.wall_shot(car, target)
if wall_shot.intercept.is_viable and wall_shot.intercept.time < direct_shot.intercept.time:
return wall_shot
aerial = AerialShot(car, self.info, target)
if (
aerial.intercept.is_viable
and car.boost > aerial.intercept.ball.pos[2] / 50 + 5
and aerial.intercept.time < direct_shot.intercept.time
and not self.info.about_to_score
and abs(aerial.intercept.ball.pos[1] - target[1]) > 2000
):
return aerial
return direct_shot
def any_shot(self, car: Car, target: vec3, intercept: Intercept) -> Maneuver:
ball = intercept.ball
if (
100 < ball.pos[2] < 2000
and abs(ball.vel[2]) < 1500
and ground_distance(car, intercept) < 1000
and abs(ball.pos[1] - self.info.my_goal.center[1]) > 500
):
is_opponent_close = False
for opponent in self.info.opponents:
if ground_distance(opponent, car) < ball.pos[2] / 2 + 500:
is_opponent_close = True
break
if not is_opponent_close:
return Dribble(car, self.info, target)
if ball.pos[2] > 300 or abs(ball.vel[2]) > 500:
return self.high_shot(car, target)
if align(car.pos, ball, target) < 0.1 and abs(ball.pos[1]) < 3000 and abs(ball.pos[0]) > 1000:
return MirrorShot(car, self.info, target)
return self.direct_shot(car, target)
def shot_or_position(self, car: Car, target: vec3, intercept: Intercept) -> Maneuver:
strike = self.any_shot(car, target, intercept)
if not isinstance(strike, Strike):
return strike
distance_to_target = distance(strike.intercept.ground_pos, target)
shift = clamp(distance_to_target / 6, 100, 800)
if not strike.intercept.is_viable or distance(strike.intercept.ground_pos, car) < shift:
return ShadowDefense(car, self.info, strike.intercept.ground_pos, shift)
return strike
def double_tap(self, car: Car, target: vec3) -> Maneuver:
if car.boost < 5:
return None
predicate = lambda car, ball: (
abs(ball.pos[0]) < 1000
and ball.pos[2] > 400
and distance(ball, target) < 4000
and align(car.pos, ball, target) > 0.3
)
intercept = AerialIntercept(car, self.info.ball_predictions, predicate)
if intercept.is_viable and car.boost > (intercept.time - car.time) * 5:
target_pos = intercept.ball.pos + direction(target, intercept.ball.pos) * 60
return Aerial(car, target_pos, intercept.time)
| 38.052239
| 111
| 0.626201
|
b331fc1e89f48f287928fa6e0124595634f6ca03
| 4,652
|
py
|
Python
|
lexibank_wold.py
|
natalia-morozova/wold
|
28b9d73cb273b140962b260e4e7327b133891d2e
|
[
"CC-BY-3.0"
] | null | null | null |
lexibank_wold.py
|
natalia-morozova/wold
|
28b9d73cb273b140962b260e4e7327b133891d2e
|
[
"CC-BY-3.0"
] | 1
|
2019-08-30T17:04:52.000Z
|
2019-08-30T17:19:23.000Z
|
lexibank_wold.py
|
natalia-morozova/wold
|
28b9d73cb273b140962b260e4e7327b133891d2e
|
[
"CC-BY-3.0"
] | 1
|
2020-02-03T08:16:33.000Z
|
2020-02-03T08:16:33.000Z
|
import attr
import glob
from clldutils.path import Path
from pylexibank.dataset import Lexeme
from pylexibank.providers import clld
import os.path
import re
from segments import Profile, Tokenizer
import unicodedata
@attr.s
class WOLDLexeme(Lexeme):
Word_ID = attr.ib(default=None)
word_source = attr.ib(default=None)
Borrowed = attr.ib(default=None)
BorrowedScore = attr.ib(default=None)
comment_on_borrowed = attr.ib(default=None)
Analyzability = attr.ib(default=None)
Simplicity_score = attr.ib(default=None)
reference = attr.ib(default=None)
numeric_frequency = attr.ib(default=None)
age_label = attr.ib(default=None)
gloss = attr.ib(default=None)
integration = attr.ib(default=None)
salience = attr.ib(default=None)
effect = attr.ib(default=None)
contact_situation = attr.ib(default=None)
original_script = attr.ib(default=None)
class Dataset(clld.CLLD):
__cldf_url__ = "http://cdstar.shh.mpg.de/bitstreams/EAEA0-92F4-126F-089F-0/wold_dataset.cldf.zip"
dir = Path(__file__).parent
id = "wold"
lexeme_class = WOLDLexeme
doc_tokenizers = None
def tokenizer(self, form, doculect):
tok_form = "^%s$" % form
tokens = self.doc_tokenizers[doculect](
unicodedata.normalize("NFC", tok_form), column="IPA"
)
return tokens.split()
def clean_form(self, form):
# we cannot use clldutils.text.strip_brackets(), as brackets most
# of the time contain phonological material
form = re.sub("\(\d\)", "", form)
# To split with clldutils.text, we'd need a regex pattern, as
# we need to include the spaces around the tilde...
form = form.split(",")[0]
form = form.split(" ~ ")[0]
return form.strip()
def cmd_install(self, **kw):
# Read individual orthographic profiles, extract the corresponding
# doculect ids (here, glottocodes), and build the appropriate
# tokenizers
profile_files = sorted(glob.glob(str(self.dir / "etc" / "*.prof")))
doculect_codes = [
os.path.splitext(os.path.basename(pf))[0] for pf in profile_files
]
self.doc_tokenizers = {
doculect: Tokenizer(
profile=Profile.from_file(pf, form="NFC"),
errors_replace=lambda c: "<{0}>".format(c),
)
for pf, doculect in zip(profile_files, doculect_codes)
}
# Cache the Concepticon IDs
concepticon = {
x.attributes["wold_id"]: x.concepticon_id
for x in self.conceptlist.concepts.values()
}
# cache the field names for CLDF output
fields = self.lexeme_class.fieldnames()
# Write data to CLDF
with self.cldf as ds:
vocab_ids = [v["ID"] for v in self.original_cldf["contributions.csv"]]
# add sources
self.add_sources(ds)
# add languages and build map for choosing the right profile
lang_map = {}
for row in self.original_cldf["LanguageTable"]:
gc, iso = row["Glottocode"], row["ISO639P3code"]
if gc == "tzot1264":
gc, iso = "tzot1259", "tzo"
if row["ID"] in vocab_ids:
ds.add_language(
ID=row["ID"], Name=row["Name"], Glottocode=gc, ISO639P3code=iso
)
# Add to map only those which are receivers
if int(row["ID"]) <= 41:
lang_map[row["ID"]] = gc
# add parameters
for row in self.original_cldf["ParameterTable"]:
ds.add_concept(
ID=row["ID"],
Name=row.pop("Name"),
Concepticon_ID=concepticon.get(row["ID"]),
)
# Being explicit on what we are adding
for row in self.original_cldf["FormTable"]:
if row["Language_ID"] in vocab_ids:
# Copy the raw Form to Value, clean form, and tokenize
row["Value"] = row["Form"]
row["Form"] = self.clean_form(row["Form"])
row["Segments"] = self.tokenizer(
row["Form"], lang_map[row["Language_ID"]]
)
# Note: We count words marked as "probably borrowed" as loans.
row["Loan"] = float(row["BorrowedScore"]) > 0.6
ds.add_form_with_segments(
**{k: v for k, v in row.items() if k in fields}
)
| 34.977444
| 101
| 0.567068
|
37bed7008b8c4107af63025e66024f393f4400ba
| 403
|
py
|
Python
|
polybar/scripts/cpu/temperature.py
|
alexeipolovin/dotfiles
|
b47e7cc93da39d1a237f496bcf2c65bdaed26593
|
[
"MIT"
] | null | null | null |
polybar/scripts/cpu/temperature.py
|
alexeipolovin/dotfiles
|
b47e7cc93da39d1a237f496bcf2c65bdaed26593
|
[
"MIT"
] | null | null | null |
polybar/scripts/cpu/temperature.py
|
alexeipolovin/dotfiles
|
b47e7cc93da39d1a237f496bcf2c65bdaed26593
|
[
"MIT"
] | null | null | null |
import psutil
def main():
CPU_TEMP = psutil.sensors_temperatures()['atk0110'][0][1]
HIGH_CPU_TEMP = psutil.sensors_temperatures()['atk0110'][0][2]
CRIT_CPU_TEMP = psutil.sensors_temperatures()['atk0110'][0][1]
if CPU_TEMP >= HIGH_CPU_TEMP:
print('CPU Temperature is high ' + str(CPU_TEMP)+'°C')
elif CPU_TEMP:
print('CPU Temperature: ' + str(CPU_TEMP)+'°C')
if __name__ == '__main__':
main()
| 33.583333
| 63
| 0.702233
|
3a8f6ec7c8f815e239fc698e057f93b6d5aa2d7e
| 7,134
|
py
|
Python
|
world/conditions/condition_commands.py
|
Zironic/arxcode
|
95464f7e7545385799de821650a0b9da6d51d1bc
|
[
"MIT"
] | 1
|
2019-12-14T22:10:13.000Z
|
2019-12-14T22:10:13.000Z
|
world/conditions/condition_commands.py
|
Zironic/arxcode
|
95464f7e7545385799de821650a0b9da6d51d1bc
|
[
"MIT"
] | null | null | null |
world/conditions/condition_commands.py
|
Zironic/arxcode
|
95464f7e7545385799de821650a0b9da6d51d1bc
|
[
"MIT"
] | null | null | null |
"""
Commands for the conditions app.
"""
from commands.base import ArxCommand
from server.utils.exceptions import PayError
from world.conditions.models import RollModifier
from world.stats_and_skills import VALID_SKILLS, VALID_STATS
class CmdModifiers(ArxCommand):
"""
Adds modifiers to objects
Usage:
@modifiers <object>
@modifiers/search <tag name>
@modifiers/targetmod <object>=<value>,<tag name>,check
@modifiers/usermod <object>=<value>,<tag name>,check
Sets modifiers for the most common usages - an object providing a bonus
against those with a particular tag (targetmod) for a given type of roll,
or an object providing a bonus to a user if they have the given tag. For
more complex modifiers (such as to specific skills, or combinations of
requirements), use django admin.
Rooms provide modifiers to those in the location, while weapons and armor
must be wielded/worn respectively. Tags they check can be added to things
with the @tag command using the category 'modifiers'.
"""
key = "@modifiers"
locks = "cmd: perm(builders)"
help_category = "building"
def display_mods(self):
"""Displays modifiers on target"""
targ = self.caller.search(self.lhs)
if not targ:
return
self.msg("Modifiers on %s: %s" % (targ, ", ".join(str(ob) for ob in targ.modifiers.all())))
def add_mod(self):
"""Adds a modifier to target"""
from server.utils.arx_utils import dict_from_choices_field
choices = dict_from_choices_field(RollModifier, "CHECK_CHOICES")
try:
value = int(self.rhslist[0])
tag_name = self.rhslist[1].lower()
check = choices[self.rhslist[2].lower()]
except (IndexError, AttributeError):
self.msg("You must provide value, tag name, and the type of check.")
except KeyError:
self.msg("Not a valid check type: %s" % ", ".join(choices.keys()))
else:
targ = self.caller.search(self.lhs)
if not targ:
return
if "targetmod" in self.switches:
mod = targ.add_modifier(value, check_type=check, target_tag=tag_name)
else:
mod = targ.add_modifier(value, check_type=check, user_tag=tag_name)
self.msg("You have added a modifier to %s: %s." % (targ, mod))
def search_mods(self):
"""Searches for modifiers for/against a given tag"""
from django.db.models import Q
msg = "Modifiers for/against %s: " % self.args
qs = RollModifier.objects.filter(Q(user_tag__iexact=self.args) | Q(target_tag__iexact=self.args))
msg += ", ".join(str(ob) for ob in qs)
self.msg(msg)
def func(self):
"""Executes modifiers command"""
if not self.switches:
return self.display_mods()
if "targetmod" in self.switches or "usermod" in self.switches:
return self.add_mod()
if "search" in self.switches:
return self.search_mods()
class CmdKnacks(ArxCommand):
"""
Creates or displays knacks for your character
Usage:
@knacks
@knacks <name>
@knacks/create <stat>,<skill>,<knack name>=<description>
@knacks/train <name>
The knacks command is a way to customize what a character is really good
at. By creating a knack, you identify a particular type of check where
your character excels: for example, you might identify an area of Faith
lore that your character specializes in with a intellect+theology knack,
or a character who is an accomplished jouster might have a dexterity +
riding knack. The description is generally meant to convey the specifics
of your character's knack and when/why it might be applicable.
Knacks cost {} xp to create, then {} + {}*rank to increase. Each rank in
a knack increases the results of applicable rolls by {} and chance for a
critical success by 1 + half your rank (rounded down).
"""
key = "@knacks"
aliases = ["knack"]
locks = "cmd:all()"
help_category = "Progression"
new_knack_cost = 150
base_increase_cost = 50
cost_per_rank = 10
bonus_per_rank = 1
def get_help(self, caller, cmdset):
return self.__doc__.format(self.new_knack_cost, self.base_increase_cost, self.cost_per_rank,
self.bonus_per_rank)
def func(self):
"""Executes the knack command"""
try:
if not self.args and not self.switches:
return self.display_knacks()
if self.args and not self.switches:
return self.view_knack()
if "create" in self.switches:
return self.create_knack()
if "train" in self.switches:
return self.train_knack()
raise self.error_class("Invalid switch.")
except (self.error_class, PayError) as err:
self.msg(err)
def display_knacks(self):
"""Displays our knacks"""
self.msg(self.caller.mods.display_knacks())
def view_knack(self):
"""Views a single knack"""
knack = self.get_knack()
self.msg(knack.display_knack())
def get_knack(self):
knack = self.caller.mods.get_knack_by_name(self.args)
if not knack:
raise self.error_class("No knack found by that name.")
return knack
def create_knack(self):
"""Attempts to create a new knack"""
desc = self.rhs
if not desc:
raise self.error_class("You must provide a description.")
try:
stat, skill, name = self.lhslist[0], self.lhslist[1], ", ".join(self.lhslist[2:])
except IndexError:
raise self.error_class("You must provide a stat and skill.")
if not name:
raise self.error_class("You must provide a name.")
if self.caller.mods.get_knack_by_name(name):
raise self.error_class("You already have a knack by that name.")
stat, skill = stat.lower(), skill.lower()
if stat not in VALID_STATS:
raise self.error_class("{} is not a valid stat.".format(stat))
if skill not in VALID_SKILLS:
raise self.error_class("{} is not a valid skill.".format(skill))
if any([knack for knack in self.caller.mods.knacks if knack.stat == stat and knack.skill == skill]):
raise self.error_class("You already have a knack for that skill and stat combination.")
self.caller.pay_xp(self.new_knack_cost)
self.caller.mods.create_knack(name, stat, skill, desc)
self.msg("You create a knack called '{}' for {}+{}.".format(name, stat, skill))
def train_knack(self):
knack = self.get_knack()
new_rank = knack.value + 1
cost = self.base_increase_cost + (self.cost_per_rank * knack.value)
self.caller.pay_xp(cost)
knack.value = new_rank
knack.save()
self.msg("You have increased {} to rank {}.".format(knack.name, new_rank))
| 40.078652
| 108
| 0.62868
|
782d345cc2466c1b5577685fc6f42fb1dce99ff2
| 2,312
|
py
|
Python
|
docs/conf.py
|
crew102/scopus
|
d8791c162cef4c2f830d983b435333d9d8eaf472
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
crew102/scopus
|
d8791c162cef4c2f830d983b435333d9d8eaf472
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
crew102/scopus
|
d8791c162cef4c2f830d983b435333d9d8eaf472
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
sys.path.append(os.path.join(os.path.abspath(os.pardir)))
autodoc_mock_imports = ["_tkinter"]
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
import scopus
# General configuration
needs_sphinx = '1.3'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'scopus'
author = 'John Kitchin and Michael E. Rose'
copyright = ','.join(['2017-2019', author])
version = scopus.__version__
release = scopus.__version__
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
autodoc_member_order = 'groupwise'
# Options for HTML output
html_theme = 'alabaster'
html_theme_options = {
'github_user': 'scopus-api',
'github_repo': 'scopus',
'github_banner': 'true',
'github_button': 'true',
'github_type': 'star',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
html_static_path = ['_static']
# Options for HTMLHelp output
html_show_sourcelink = True
htmlhelp_basename = 'scopusdoc'
autoclass_content = 'both'
# Option to group members of classes
autodoc_member_order = 'groupwise'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, 'scopus.tex', 'scopus Documentation',
author, 'manual'),
]
# Options for manual page output
man_pages = [
(master_doc, 'scopus', 'scopus Documentation',
[author], 1)
]
# Options for Texinfo output
texinfo_documents = [
(master_doc, 'scopus', 'scopus Documentation',
author, 'scopus', 'One line description of project.',
'Miscellaneous'),
]
| 21.407407
| 75
| 0.647491
|
2a260359bac78dfcfe4f69eee9b09ad56cebc7fc
| 1,528
|
py
|
Python
|
DFGN/model/model.py
|
mottled233/DFGN-pytorch
|
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
|
[
"MIT"
] | null | null | null |
DFGN/model/model.py
|
mottled233/DFGN-pytorch
|
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
|
[
"MIT"
] | null | null | null |
DFGN/model/model.py
|
mottled233/DFGN-pytorch
|
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
|
[
"MIT"
] | null | null | null |
from model.layers import *
from pytorch_pretrained_bert.modeling import BertModel
from model.GFN import *
from model.layers import *
from transformers import BertModel
from transformers import BertConfig as BC
class DFGN(nn.Module):
"""
Packing Query Version
"""
def __init__(self, config, pretrained_bert):
super(DFGN, self).__init__()
self.config = config
self.encoder = BertModel.from_pretrained(pretrained_bert, cache_dir="data/chn_bert_base")
if config.without_bert_optimize:
for p in self.encoder.parameters():
p.requires_grad = False
self.model = GraphFusionNet(config=config)
# self.prediction = BaselinePredictionLayer(config=config)
def forward(self, batch, return_yp=True, debug=False):
doc_ids, doc_mask, segment_ids = batch['context_idxs'], batch['context_mask'], batch['segment_idxs']
N = doc_ids.shape[0]
doc_encoding = self.encoder(input_ids=doc_ids,
# token_type_ids=segment_ids,
attention_mask=doc_mask)[0]
if self.config.without_bert_optimize:
doc_encoding = doc_encoding.detach()
batch['context_encoding'] = doc_encoding
start, end, sp, Type, softmask, ent, yp1, yp2 = self.model(batch, return_yp)
return start, end, sp, Type, softmask, yp1, yp2
# start, end, sp, Type, yp1, yp2 = self.prediction(batch)
# return start, end, sp, Type, None, yp1, yp2
| 39.179487
| 108
| 0.650524
|
aaaa2560a7d399a833de1b0f5dd47655fefdebd0
| 95
|
py
|
Python
|
contentbox/apps.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | null | null | null |
contentbox/apps.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | null | null | null |
contentbox/apps.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ContentboxConfig(AppConfig):
name = 'contentbox'
| 15.833333
| 34
| 0.768421
|
2b8fdaf679572873b80d699afd79f1cac9c58eb8
| 538
|
py
|
Python
|
registration/migrations/0015_auto_20170212_2029.py
|
Eigenbaukombinat/ebkmember
|
6989e69783b977e96493217723c41bd69c8ee788
|
[
"MIT"
] | 1
|
2018-04-12T14:01:56.000Z
|
2018-04-12T14:01:56.000Z
|
registration/migrations/0015_auto_20170212_2029.py
|
Eigenbaukombinat/ebkmember
|
6989e69783b977e96493217723c41bd69c8ee788
|
[
"MIT"
] | 3
|
2020-02-11T23:55:16.000Z
|
2021-06-10T19:31:06.000Z
|
registration/migrations/0015_auto_20170212_2029.py
|
Eigenbaukombinat/ebkmember
|
6989e69783b977e96493217723c41bd69c8ee788
|
[
"MIT"
] | 3
|
2017-02-13T19:04:56.000Z
|
2018-09-12T19:42:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-12 20:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0014_auto_20170205_1600'),
]
operations = [
migrations.AlterField(
model_name='member',
name='fee',
field=models.FloatField(choices=[('normal', 24), ('reduced', 18), ('sustain_min', 5), ('other', 0), ('sustain_other', 0)]),
),
]
| 25.619048
| 135
| 0.604089
|
272670163204814dc51abf75ae1bbc8307412331
| 1,783
|
py
|
Python
|
api/sharing/driver.py
|
cypherpath/sdios-api-sdk
|
dbef980ee68844688e9911443571eca06ee02a6a
|
[
"MIT"
] | 1
|
2020-12-04T13:32:19.000Z
|
2020-12-04T13:32:19.000Z
|
api/sharing/driver.py
|
cypherpath/sdios-api-sdk
|
dbef980ee68844688e9911443571eca06ee02a6a
|
[
"MIT"
] | null | null | null |
api/sharing/driver.py
|
cypherpath/sdios-api-sdk
|
dbef980ee68844688e9911443571eca06ee02a6a
|
[
"MIT"
] | 4
|
2019-05-07T00:35:06.000Z
|
2020-02-08T17:35:33.000Z
|
"""SharingDriver class object"""
from typing import Optional
from api.base_driver import BaseDriver
from api.driver import APIDriver
from api.driver import APIResponse
from settings.urls import APICategory
class SharingDriver(BaseDriver):
"""Make all network API calls."""
_category = APICategory.SHARING
def __init__(self, api_driver: APIDriver) -> None:
"""Initialize SharingDriver class
:param api_driver: Allows SharingDriver to communicate with SDI OS
:type api_driver: APIDriver class object
"""
super().__init__(api_driver)
self.user_pk = None # type: Optional[int]
def clear(self) -> None:
"""Clear pks."""
self.user_pk = None # type: Optional[int]
def get_all_shared(self) -> APIResponse:
"""Get users/groups shared networks and return response."""
return self._get("network_list")
def get_all_users_shared(self) -> APIResponse:
"""Get all user's shared networks and return response."""
return self._get("user_list")
def get_user_shared(self, user_pk: int = None) -> APIResponse:
"""Get a user's shared networks and return response.
:param user_pk: Pk of user to look up shared networks. Default is self.user_pk.
:type user_pk: int
"""
url_args = {"pk": self.user_pk if user_pk is None else user_pk}
return self._get("user_detail", url_args)
def get_all_groups_shared(self) -> APIResponse:
"""Get all group's shared networks and return response."""
return self._get("group_list")
def get_group_shared(self, group_pk: int) -> APIResponse:
"""Get group's shared networks and return response."""
return self._get("group_detail", {"group_pk": group_pk})
| 34.960784
| 87
| 0.670219
|
8d5f22d272ac6944aee9e0ec0a8bb0bc3ba5cab2
| 44
|
py
|
Python
|
slate/typings/__init__.py
|
OpenRobot-Packages/slate
|
687a73dab1fac2e8b1d65297f1fdef0ec8301a16
|
[
"MIT"
] | 27
|
2020-10-18T04:35:00.000Z
|
2021-08-03T13:21:27.000Z
|
dashboard/typings/__init__.py
|
Axelancerr/Life-dashboard
|
ecde503c1a90fdedd680ae19d22b3f5c9da4c4c2
|
[
"MIT"
] | 19
|
2020-12-04T23:03:51.000Z
|
2021-08-14T20:21:53.000Z
|
dashboard/typings/__init__.py
|
Axelancerr/Life-dashboard
|
ecde503c1a90fdedd680ae19d22b3f5c9da4c4c2
|
[
"MIT"
] | 7
|
2020-10-26T18:51:17.000Z
|
2021-07-07T05:39:01.000Z
|
# Future
from __future__ import annotations
| 14.666667
| 34
| 0.840909
|
10f60ab5c5110f42b3552e10ff974135ed3d7d99
| 6,254
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/purestorage/flashblade/plugins/modules/purefb_network.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: purefb_network
version_added: "1.0.0"
short_description: Manage network interfaces in a Pure Storage FlashBlade
description:
- This module manages network interfaces on Pure Storage FlashBlade.
- When creating a network interface a subnet must already exist with
a network prefix that covers the IP address of the interface being
created.
author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
name:
description:
- Interface Name.
required: true
type: str
state:
description:
- Create, delete or modifies a network interface.
required: false
default: present
choices: [ "present", "absent" ]
type: str
address:
description:
- IP address of interface.
required: false
type: str
services:
description:
- Define which services are configured for the interfaces.
required: false
choices: [ "data", "replication" ]
default: data
type: str
itype:
description:
- Type of interface.
required: false
choices: [ "vip" ]
default: vip
type: str
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
EXAMPLES = """
- name: Create new network interface named foo
purefb_network:
name: foo
address: 10.21.200.23
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Change IP address of network interface named foo
purefb_network:
name: foo
state: present
address: 10.21.200.123
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Delete network interface named foo
purefb_network:
name: foo
state: absent
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
RETURN = """
"""
HAS_PURITY_FB = True
try:
from purity_fb import NetworkInterface
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
purefb_argument_spec,
)
MINIMUM_API_VERSION = "1.3"
def get_iface(module, blade):
"""Return Filesystem or None"""
iface = []
iface.append(module.params["name"])
try:
res = blade.network_interfaces.list_network_interfaces(names=iface)
return res.items[0]
except Exception:
return None
def create_iface(module, blade):
"""Create Network Interface"""
changed = True
if not module.check_mode:
iface = []
services = []
iface.append(module.params["name"])
services.append(module.params["services"])
try:
blade.network_interfaces.create_network_interfaces(
names=iface,
network_interface=NetworkInterface(
address=module.params["address"],
services=services,
type=module.params["itype"],
),
)
except Exception:
module.fail_json(
msg="Interface creation failed. Check subnet exists for {0}".format(
module.params["address"]
)
)
module.exit_json(changed=changed)
def modify_iface(module, blade):
"""Modify Network Interface IP address"""
changed = False
iface = get_iface(module, blade)
iface_new = []
iface_new.append(module.params["name"])
if module.params["address"] != iface.address:
changed = True
if not module.check_mode:
try:
blade.network_interfaces.update_network_interfaces(
names=iface_new,
network_interface=NetworkInterface(
address=module.params["address"]
),
)
changed = True
except Exception:
module.fail_json(
msg="Failed to modify Interface {0}".format(module.params["name"])
)
module.exit_json(changed=changed)
def delete_iface(module, blade):
"""Delete Network Interface"""
changed = True
if not module.check_mode:
iface = []
iface.append(module.params["name"])
try:
blade.network_interfaces.delete_network_interfaces(names=iface)
except Exception:
module.fail_json(
msg="Failed to delete network {0}".format(module.params["name"])
)
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default="present", choices=["present", "absent"]),
address=dict(),
services=dict(default="data", choices=["data", "replication"]),
itype=dict(default="vip", choices=["vip"]),
)
)
required_if = [["state", "present", ["address"]]]
module = AnsibleModule(
argument_spec, required_if=required_if, supports_check_mode=True
)
if not HAS_PURITY_FB:
module.fail_json(msg="purity_fb sdk is required for this module")
state = module.params["state"]
blade = get_blade(module)
api_version = blade.api_version.list_versions().versions
if MINIMUM_API_VERSION not in api_version:
module.fail_json(msg="Upgrade Purity//FB to enable this module")
iface = get_iface(module, blade)
if state == "present" and not iface:
create_iface(module, blade)
elif state == "present" and iface:
modify_iface(module, blade)
elif state == "absent" and iface:
delete_iface(module, blade)
elif state == "absent" and not iface:
module.exit_json(changed=False)
if __name__ == "__main__":
main()
| 27.795556
| 92
| 0.630956
|
a823404e0046da4f3ec84d1c3c75a5bb45daa90b
| 5,746
|
py
|
Python
|
qa/rpc-tests/rpcbind_test.py
|
PaulRomerolegal/bitcoin
|
cdac2e7f0c980ae0e3e75b1ba2a0d2538e3c4aa0
|
[
"MIT"
] | 1
|
2021-07-07T13:28:10.000Z
|
2021-07-07T13:28:10.000Z
|
qa/rpc-tests/rpcbind_test.py
|
bcpki/nonce2
|
053bab938e335f39314d9f51d63a47e3bdb43142
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/rpcbind_test.py
|
bcpki/nonce2
|
053bab938e335f39314d9f51d63a47e3bdb43142
|
[
"MIT"
] | 1
|
2017-08-25T18:51:45.000Z
|
2017-08-25T18:51:45.000Z
|
#!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bitcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bitcoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| 37.070968
| 110
| 0.646363
|
be9965d3e74cda705c364abdf63b8b9a318f0c05
| 3,250
|
py
|
Python
|
src/fhir_types/FHIR_PlanDefinition_Goal.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | 2
|
2022-02-03T00:51:30.000Z
|
2022-02-03T18:42:43.000Z
|
src/fhir_types/FHIR_PlanDefinition_Goal.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
src/fhir_types/FHIR_PlanDefinition_Goal.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, List, Literal, TypedDict
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_PlanDefinition_Target import FHIR_PlanDefinition_Target
from .FHIR_RelatedArtifact import FHIR_RelatedArtifact
from .FHIR_string import FHIR_string
# This resource allows for the definition of various types of plans as a sharable, consumable, and executable artifact. The resource is general enough to support the description of a broad range of clinical artifacts such as clinical decision support rules, order sets and protocols.
FHIR_PlanDefinition_Goal = TypedDict(
"FHIR_PlanDefinition_Goal",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Indicates a category the goal falls within.
"category": FHIR_CodeableConcept,
# Human-readable and/or coded description of a specific desired objective of care, such as "control blood pressure" or "negotiate an obstacle course" or "dance with child at wedding".
"description": FHIR_CodeableConcept,
# Identifies the expected level of importance associated with reaching/sustaining the defined goal.
"priority": FHIR_CodeableConcept,
# The event after which the goal should begin being pursued.
"start": FHIR_CodeableConcept,
# Identifies problems, conditions, issues, or concerns the goal is intended to address.
"addresses": List[FHIR_CodeableConcept],
# Didactic or other informational resources associated with the goal that provide further supporting information about the goal. Information resources can include inline text commentary and links to web resources.
"documentation": List[FHIR_RelatedArtifact],
# Indicates what should be done and within what timeframe.
"target": List[FHIR_PlanDefinition_Target],
},
total=False,
)
| 92.857143
| 836
| 0.774462
|
874b9d5f067ef325786ecd8f7c3dbed21dbe0cb8
| 587
|
py
|
Python
|
rest_ml/firstApp/migrations/0003_movierating.py
|
Binucb/machineLearning_RestAPI
|
5ffb51febd9ac31e74977aa20cb1ab8c9e44560a
|
[
"MIT"
] | null | null | null |
rest_ml/firstApp/migrations/0003_movierating.py
|
Binucb/machineLearning_RestAPI
|
5ffb51febd9ac31e74977aa20cb1ab8c9e44560a
|
[
"MIT"
] | 4
|
2021-03-19T02:02:07.000Z
|
2021-06-04T22:54:44.000Z
|
rest_ml/firstApp/migrations/0003_movierating.py
|
Binucb/machineLearning_RestAPI
|
5ffb51febd9ac31e74977aa20cb1ab8c9e44560a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-18 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstApp', '0002_auto_20200418_1616'),
]
operations = [
migrations.CreateModel(
name='MovieRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movie_name', models.CharField(max_length=100)),
('rating', models.FloatField(max_length=3)),
],
),
]
| 26.681818
| 114
| 0.584327
|
61042e82c1db6bd90e22be1eeeaec7dcb4f1f87e
| 393
|
py
|
Python
|
PyTrinamicMicro/platforms/motionpy1/examples/modules/TMCM1161/TMCM1161_RS485_rotate.py
|
trinamic/PyTrinamicMicro
|
defcd1aef2ea35e848b3a3faf201ec4d8a649bfd
|
[
"MIT"
] | 4
|
2020-06-25T08:59:59.000Z
|
2021-07-17T09:52:38.000Z
|
PyTrinamicMicro/platforms/motionpy1/examples/modules/TMCM1161/TMCM1161_RS485_rotate.py
|
trinamic/PyTrinamicMicro
|
defcd1aef2ea35e848b3a3faf201ec4d8a649bfd
|
[
"MIT"
] | 2
|
2020-10-08T15:48:07.000Z
|
2021-05-12T13:28:27.000Z
|
PyTrinamicMicro/platforms/motionpy1/examples/modules/TMCM1161/TMCM1161_RS485_rotate.py
|
trinamic/PyTrinamicMicro
|
defcd1aef2ea35e848b3a3faf201ec4d8a649bfd
|
[
"MIT"
] | 3
|
2021-01-26T09:24:28.000Z
|
2021-04-27T08:42:38.000Z
|
'''
Rotate the motor with TMCM1161 using RS485 interface.
Created on 05.10.2020
@author: LK
'''
from PyTrinamic.modules.TMCM1161.TMCM_1161 import TMCM_1161
from PyTrinamicMicro.platforms.motionpy1.connections.rs485_tmcl_interface import rs485_tmcl_interface
import time
con = rs485_tmcl_interface()
module = TMCM_1161(con)
module.rotate(0, 1000)
time.sleep(5)
module.stop(0)
con.close()
| 18.714286
| 101
| 0.798982
|
99f081c9cfaeeb2dff165dad2c4fa77874357537
| 3,193
|
py
|
Python
|
nomad/api/jobs.py
|
suleimanmahmoud/python-nomad
|
5beee5f027114c3acf8c5aedc0f4ae4a37165b2a
|
[
"MIT"
] | null | null | null |
nomad/api/jobs.py
|
suleimanmahmoud/python-nomad
|
5beee5f027114c3acf8c5aedc0f4ae4a37165b2a
|
[
"MIT"
] | null | null | null |
nomad/api/jobs.py
|
suleimanmahmoud/python-nomad
|
5beee5f027114c3acf8c5aedc0f4ae4a37165b2a
|
[
"MIT"
] | null | null | null |
import nomad.api.exceptions
from nomad.api.base import Requester
class Jobs(Requester):
"""
The jobs endpoint is used to query the status of existing
jobs in Nomad and to register new jobs.
By default, the agent's local region is used.
https://www.nomadproject.io/docs/http/jobs.html
"""
ENDPOINT = "jobs"
def __init__(self, **kwargs):
super(Jobs, self).__init__(**kwargs)
def __str__(self):
return "{0}".format(self.__dict__)
def __repr__(self):
return "{0}".format(self.__dict__)
def __getattr__(self, item):
msg = "{0} does not exist".format(item)
raise AttributeError(msg)
def __contains__(self, item):
try:
jobs = self.get_jobs()
for j in jobs:
if j["ID"] == item:
return True
if j["Name"] == item:
return True
else:
return False
except nomad.api.exceptions.URLNotFoundNomadException:
return False
def __len__(self):
jobs = self.get_jobs()
return len(jobs)
def __getitem__(self, item):
try:
jobs = self.get_jobs()
for j in jobs:
if j["ID"] == item:
return j
if j["Name"] == item:
return j
else:
raise KeyError
except nomad.api.exceptions.URLNotFoundNomadException:
raise KeyError
def __iter__(self):
jobs = self.get_jobs()
return iter(jobs)
def get_jobs(self, prefix=None, region=None):
""" Lists all the jobs registered with Nomad.
https://www.nomadproject.io/docs/http/jobs.html
arguments:
- prefix :(str) optional, specifies a string to filter jobs on based on an prefix.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix, "region": region}
return self.request(method="get", params=params).json()
def register_job(self, job):
""" Register a job with Nomad.
https://www.nomadproject.io/docs/http/jobs.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request(json=job, method="post").json()
def parse(self, hcl, canonicalize=False):
""" Parse a HCL Job file. Returns a dict with the JSON formatted job.
This API endpoint is only supported from Nomad version 0.8.3.
https://www.nomadproject.io/api/jobs.html#parse-job
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("parse", json={"JobHCL": hcl, "Canonicalize": canonicalize}, method="post", allow_redirects=True).json()
| 30.409524
| 132
| 0.567491
|
cb813a251d45826a7957d945cd9767b85a884af8
| 10,891
|
py
|
Python
|
gs_quant/test/api/test_data.py
|
femtotrader/gs-quant
|
33a13f6f53ce8a62565b2d2870aaddf52847e275
|
[
"Apache-2.0"
] | 1
|
2021-08-31T09:19:25.000Z
|
2021-08-31T09:19:25.000Z
|
gs_quant/test/api/test_data.py
|
femtotrader/gs-quant
|
33a13f6f53ce8a62565b2d2870aaddf52847e275
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/test/api/test_data.py
|
femtotrader/gs-quant
|
33a13f6f53ce8a62565b2d2870aaddf52847e275
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2018 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal, assert_series_equal
from gs_quant.api.gs.data import GsDataApi
from gs_quant.context_base import ContextMeta
from gs_quant.errors import MqValueError
from gs_quant.markets import MarketDataCoordinate
from gs_quant.session import GsSession, Environment
test_coordinates = (
MarketDataCoordinate(mkt_type='Prime', mkt_quoting_style='price', mkt_asset='335320934'),
MarketDataCoordinate(mkt_type='IR', mkt_asset='USD', mkt_class='Swap', mkt_point=('2Y',)),
)
test_str_coordinates = (
'Prime_335320934_.price',
'IR_USD_Swap_2Y'
)
bond_data = [
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0139,
'time': pd.to_datetime('2019-01-20T01:03:00Z')
},
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0141,
'time': pd.to_datetime('2019-01-20T01:08:00Z')
}
]
swap_data = [
{
'mktType': 'IR',
'mktAsset': 'USD',
'mktClass': 'Swap',
'mktPoint': ('2Y',),
'mktQuotingStyle': 'ATMRate',
'ATMRate': 0.02592,
'time': pd.to_datetime('2019-01-20T01:09:45Z')
}
]
bond_expected_frame = pd.DataFrame(
data={
'time': [pd.to_datetime('2019-01-20T01:03:00Z'), pd.to_datetime('2019-01-20T01:08:00Z')],
'mktType': ['Prime', 'Prime'],
'mktAsset': ['335320934', '335320934'],
'mktQuotingStyle': ['price', 'price'],
'value': [1.0139, 1.0141]
},
index=pd.DatetimeIndex(['2019-01-20T01:03:00', '2019-01-20T01:08:00']),
)
swap_expected_frame = pd.DataFrame(
data={
'time': [pd.to_datetime('2019-01-20T01:09:45Z')],
'mktType': ['IR'],
'mktAsset': ['USD'],
'mktClass': ['Swap'],
'mktPoint': [('2Y',)],
'mktQuotingStyle': ['ATMRate'],
'value': [0.02592]
},
index=pd.DatetimeIndex(['2019-01-20T01:09:45']),
)
def test_coordinates_data(mocker):
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_post', side_effect=[{'responses': [{'data': bond_data}]},
{'responses': [{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]}
])
coord_data_result = GsDataApi.coordinates_data(coordinates=test_coordinates[0], start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10))
assert_frame_equal(coord_data_result, bond_expected_frame)
str_coord_data_result = GsDataApi.coordinates_data(coordinates=test_str_coordinates[1],
start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10))
assert_frame_equal(str_coord_data_result, swap_expected_frame)
coords_data_result = GsDataApi.coordinates_data(coordinates=test_coordinates, start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10), as_multiple_dataframes=True)
assert len(coords_data_result) == 2
assert_frame_equal(coords_data_result[0], bond_expected_frame)
assert_frame_equal(coords_data_result[1], swap_expected_frame)
str_coords_data_result = GsDataApi.coordinates_data(coordinates=test_str_coordinates,
start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10), as_multiple_dataframes=True)
assert len(str_coords_data_result) == 2
assert_frame_equal(str_coords_data_result[0], bond_expected_frame)
assert_frame_equal(str_coords_data_result[1], swap_expected_frame)
GsSession.current._post.assert_called_with('/data/coordinates/query', payload=mocker.ANY)
assert GsSession.current._post.call_count == 4
def test_coordinate_data_series(mocker):
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_post', side_effect=[{'responses': [{'data': bond_data}]},
{'responses': [{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]}
])
bond_expected_series = pd.Series(index=bond_expected_frame.index, data=bond_expected_frame.value.values)
swap_expected_series = pd.Series(index=swap_expected_frame.index, data=swap_expected_frame.value.values)
coord_data_result = GsDataApi.coordinates_data_series(coordinates=test_coordinates[0],
start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10))
assert_series_equal(coord_data_result, bond_expected_series)
str_coord_data_result = GsDataApi.coordinates_data_series(coordinates=test_str_coordinates[1],
start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10))
assert_series_equal(str_coord_data_result, swap_expected_series)
coords_data_result = GsDataApi.coordinates_data_series(coordinates=test_coordinates,
start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10))
assert len(coords_data_result) == 2
assert_series_equal(coords_data_result[0], bond_expected_series)
assert_series_equal(coords_data_result[1], swap_expected_series)
str_coords_data_result = GsDataApi.coordinates_data_series(coordinates=test_str_coordinates,
start=dt.datetime(2019, 1, 2, 1, 0),
end=dt.datetime(2019, 1, 2, 1, 10))
assert len(str_coords_data_result) == 2
assert_series_equal(str_coords_data_result[0], bond_expected_series)
assert_series_equal(str_coords_data_result[1], swap_expected_series)
GsSession.current._post.assert_called_with('/data/coordinates/query', payload=mocker.ANY)
assert GsSession.current._post.call_count == 4
def test_coordinate_last(mocker):
data = {'responses': [
{'data': [
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0141,
'time': '2019-01-20T01:08:00Z'
}
]},
{'data': [
{
'mktType': 'IR',
'mktAsset': 'USD',
'mktClass': 'Swap',
'mktPoint': ('2Y',),
'mktQuotingStyle': 'ATMRate',
'ATMRate': 0.02592,
'time': '2019-01-20T01:09:45Z'
}
]}
]}
expected_result = pd.DataFrame(
data={
'mktType': ['Prime', 'IR'],
'mktAsset': ['335320934', 'USD'],
'mktClass': [None, 'Swap'],
'mktPoint': [None, ('2Y',)],
'mktQuotingStyle': ['price', None],
'value': [1.0141, 0.02592]
}
)
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
GsSession.current._post = mocker.Mock(return_value=data)
result = GsDataApi.coordinates_last(coordinates=test_coordinates, as_of=dt.datetime(2019, 1, 2, 1, 10),
as_dataframe=True)
assert result.equals(expected_result)
result_from_str = GsDataApi.coordinates_last(coordinates=test_str_coordinates, as_of=dt.datetime(2019, 1, 2, 1, 10),
as_dataframe=True)
assert result_from_str.equals(expected_result)
GsSession.current._post.assert_called_with('/data/coordinates/query/last', payload=mocker.ANY)
assert GsSession.current._post.call_count == 2
def test_get_coverage_api(mocker):
test_coverage_data = {'results': [{'gsid': 'gsid1'}]}
mocker.patch.object(ContextMeta, 'current', return_value=GsSession(Environment.QA))
mocker.patch.object(ContextMeta.current, '_get', return_value=test_coverage_data)
data = GsDataApi.get_coverage('MA_RANK')
assert [{'gsid': 'gsid1'}] == data
def test_coordinates_converter():
coord = GsDataApi._coordinate_from_str("A_B_C_D")
assert str(coord) == 'A|B|C|D|'
coord = GsDataApi._coordinate_from_str("A_B_C.E")
assert str(coord) == 'A|B|C||E'
coord = GsDataApi._coordinate_from_str("A_B_.E")
assert str(coord) == 'A|B|||E'
coord = GsDataApi._coordinate_from_str("A_B_C_D_E.F")
assert str(coord) == 'A|B|C|D_E|F'
with pytest.raises(MqValueError, match='invalid coordinate A'):
GsDataApi._coordinate_from_str("A")
if __name__ == "__main__":
pytest.main(args=["test_data.py"])
| 42.877953
| 120
| 0.56487
|
144ef28742d676d510ee24b61621294675fb5895
| 989
|
py
|
Python
|
research/delf/setup.py
|
bkj/tf-models
|
a581c0951e867a034122aab1e42cfed8973dbb16
|
[
"Apache-2.0"
] | null | null | null |
research/delf/setup.py
|
bkj/tf-models
|
a581c0951e867a034122aab1e42cfed8973dbb16
|
[
"Apache-2.0"
] | null | null | null |
research/delf/setup.py
|
bkj/tf-models
|
a581c0951e867a034122aab1e42cfed8973dbb16
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup script for delf."""
from setuptools import setup, find_packages
setup(
name='delf',
version='0.1',
include_package_data=True,
packages=find_packages(),
description='DELF (DEep Local Features)',
package_data={'': ['delf_config_example.pbtxt']},
zip_safe=False,
)
| 35.321429
| 80
| 0.677452
|
63b11445bc0005ca0e0928abf930329814dbc8d3
| 12,164
|
py
|
Python
|
tests/request_methods/test_reports.py
|
1Blackdiamondsc/python-amazon-mws
|
c903bc4402808937b4a1240aec2e129dcbc7e7d8
|
[
"Unlicense"
] | 1
|
2020-09-14T10:29:09.000Z
|
2020-09-14T10:29:09.000Z
|
tests/request_methods/test_reports.py
|
HumanFact/python-amazon-mws
|
bba6c5ed2080e5864840098ecb9f6ab7f9ac2def
|
[
"Unlicense"
] | 2
|
2021-01-26T17:25:45.000Z
|
2021-01-28T09:39:17.000Z
|
tests/request_methods/test_reports.py
|
HumanFact/python-amazon-mws
|
bba6c5ed2080e5864840098ecb9f6ab7f9ac2def
|
[
"Unlicense"
] | 1
|
2021-05-25T02:37:56.000Z
|
2021-05-25T02:37:56.000Z
|
"""Tests for the Reports API class."""
import datetime
import unittest
import mws
from mws.utils import clean_bool, clean_date
from .utils import CommonAPIRequestTools
class ReportsTestCase(CommonAPIRequestTools, unittest.TestCase):
"""Test cases for Reports."""
api_class = mws.Reports
# TODO: Add remaining methods for Reports
def test_request_report(self):
"""RequestReport operation."""
report_type = "_GET_FLAT_FILE_OPEN_LISTINGS_DATA_"
start_date = datetime.datetime(2018, 4, 30, 22, 59, 59)
end_date = datetime.datetime(2018, 4, 30, 23, 59, 59)
marketplace_ids = [
"iQzBCmf1y3",
"wH9q0CiEMp",
]
params = self.api.request_report(
report_type=report_type,
start_date=start_date,
end_date=end_date,
marketplace_ids=marketplace_ids,
)
self.assert_common_params(params, action="RequestReport")
self.assertEqual(params["ReportType"], report_type)
self.assertEqual(params["StartDate"], "2018-04-30T22%3A59%3A59")
self.assertEqual(params["EndDate"], "2018-04-30T23%3A59%3A59")
self.assertEqual(params["MarketplaceIdList.Id.1"], marketplace_ids[0])
self.assertEqual(params["MarketplaceIdList.Id.2"], marketplace_ids[1])
def test_report_options_dict(self):
"""Asserts a dict used for report_options argument for request_report method
builds the correct string output.
"""
report_type = "_GET_MERCHANT_LISTINGS_ALL_DATA_"
report_options = {"custom": True, "somethingelse": "abc"}
params = self.api.request_report(
report_type=report_type,
report_options=report_options,
)
self.assert_common_params(params, action="RequestReport")
assert params["ReportType"] == report_type
# Cannot assume the order of the options dict passed on older versions
# of Python, so two possible outputs are used:
# Further, the final result should be encoded once before being sent,
# resulting in the following URL-encoded strings.
options_possible = (
"custom%3Dtrue%3Bsomethingelse%3Dabc",
"somethingelse%3Dabc%3Bcustom%3Dtrue",
)
assert params["ReportOptions"] in options_possible
def test_parameter_error(self):
"""RequestReport wrong parameter"""
# list will throw error
report_type = ["_GET_FLAT_FILE_OPEN_LISTINGS_DATA_"]
start_date = datetime.datetime(2018, 4, 30, 22, 59, 59)
end_date = datetime.datetime(2018, 4, 30, 23, 59, 59)
marketplace_ids = [
"iQzBCmf1y3",
"wH9q0CiEMp",
]
with self.assertRaises(mws.MWSError):
self.api.request_report(
report_type=report_type,
start_date=start_date,
end_date=end_date,
marketplace_ids=marketplace_ids,
)
def test_get_report_request_list(self):
"""GetReportRequestList operation."""
request_ids = [
"rPlSxpfnR7",
"qRrkqv03qh",
]
report_types = [
"_GET_MFN_PAN_EU_OFFER_STATUS_",
"_GET_FLAT_FILE_ORDERS_DATA_",
]
processing_statuses = [
"_SUBMITTED_",
"_DONE_NO_DATA_",
]
max_count = 987
from_date = datetime.datetime.utcnow()
to_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
params = self.api.get_report_request_list(
request_ids=request_ids,
report_types=report_types,
processing_statuses=processing_statuses,
max_count=max_count,
from_date=from_date,
to_date=to_date,
)
self.assert_common_params(params, action="GetReportRequestList")
self.assertEqual(params["MaxCount"], str(max_count))
self.assertEqual(params["RequestedFromDate"], clean_date(from_date))
self.assertEqual(params["RequestedToDate"], clean_date(to_date))
self.assertEqual(params["ReportRequestIdList.Id.1"], request_ids[0])
self.assertEqual(params["ReportRequestIdList.Id.2"], request_ids[1])
self.assertEqual(params["ReportTypeList.Type.1"], report_types[0])
self.assertEqual(params["ReportTypeList.Type.2"], report_types[1])
self.assertEqual(
params["ReportProcessingStatusList.Status.1"], processing_statuses[0]
)
self.assertEqual(
params["ReportProcessingStatusList.Status.2"], processing_statuses[1]
)
def test_get_report_request_list_by_next_token(self):
"""GetReportRequestListByNextToken operation, via method decorator."""
next_token = "RXmLZ2bEgE"
params = self.api.get_report_request_list(next_token=next_token)
self.assert_common_params(params, action="GetReportRequestListByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_report_request_list_by_next_token_alias(self):
"""GetReportRequestListByNextToken operation, via alias method."""
next_token = "0hytxbkaOb"
params = self.api.get_report_request_list_by_next_token(next_token)
self.assert_common_params(params, action="GetReportRequestListByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_report_request_count(self):
"""GetReportRequestCount operation."""
report_types = [
"_GET_XML_ALL_ORDERS_DATA_BY_LAST_UPDATE_",
"_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_",
]
processing_statuses = [
"_CANCELLED_",
"_IN_PROGRESS_",
]
from_date = datetime.datetime.utcnow()
to_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
params = self.api.get_report_request_count(
report_types=report_types,
processing_statuses=processing_statuses,
from_date=from_date,
to_date=to_date,
)
self.assert_common_params(params, action="GetReportRequestCount")
self.assertEqual(params["RequestedFromDate"], clean_date(from_date))
self.assertEqual(params["RequestedToDate"], clean_date(to_date))
self.assertEqual(params["ReportTypeList.Type.1"], report_types[0])
self.assertEqual(params["ReportTypeList.Type.2"], report_types[1])
self.assertEqual(
params["ReportProcessingStatusList.Status.1"], processing_statuses[0]
)
self.assertEqual(
params["ReportProcessingStatusList.Status.2"], processing_statuses[1]
)
def test_get_report_list(self):
"""GetReportList operation."""
request_ids = [
"c4eik8sxXC",
"NIVgnbHXe0",
]
report_types = [
"_GET_V1_SELLER_PERFORMANCE_REPORT_",
"_GET_SELLER_FEEDBACK_DATA_",
]
max_count = 564
acknowledged = True
from_date = datetime.datetime.utcnow()
to_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
params = self.api.get_report_list(
request_ids=request_ids,
max_count=max_count,
report_types=report_types,
acknowledged=acknowledged,
from_date=from_date,
to_date=to_date,
)
self.assert_common_params(params, action="GetReportList")
self.assertEqual(params["Acknowledged"], clean_bool(acknowledged))
self.assertEqual(params["AvailableFromDate"], clean_date(from_date))
self.assertEqual(params["AvailableToDate"], clean_date(to_date))
self.assertEqual(params["MaxCount"], str(max_count))
self.assertEqual(params["ReportRequestIdList.Id.1"], request_ids[0])
self.assertEqual(params["ReportRequestIdList.Id.2"], request_ids[1])
self.assertEqual(params["ReportTypeList.Type.1"], report_types[0])
self.assertEqual(params["ReportTypeList.Type.2"], report_types[1])
def test_get_report_list_by_next_token(self):
"""GetReportListByNextToken operation, via method decorator."""
next_token = "5u6Of2fS8B"
params = self.api.get_report_list(next_token=next_token)
self.assert_common_params(params, action="GetReportListByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_report_list_by_next_token_alias(self):
"""GetReportListByNextToken operation, via alias method."""
next_token = "3TczcliCkb"
params = self.api.get_report_list_by_next_token(next_token)
self.assert_common_params(params, action="GetReportListByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_report_count(self):
"""GetReportCount operation."""
report_types = [
"_GET_AMAZON_FULFILLED_SHIPMENTS_DATA_",
"_GET_AFN_INVENTORY_DATA_BY_COUNTRY_",
]
acknowledged = True
from_date = datetime.datetime.utcnow()
to_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
params = self.api.get_report_count(
report_types=report_types,
acknowledged=acknowledged,
from_date=from_date,
to_date=to_date,
)
self.assert_common_params(params, action="GetReportCount")
self.assertEqual(params["Acknowledged"], clean_bool(acknowledged))
self.assertEqual(params["AvailableFromDate"], clean_date(from_date))
self.assertEqual(params["AvailableToDate"], clean_date(to_date))
self.assertEqual(params["ReportTypeList.Type.1"], report_types[0])
self.assertEqual(params["ReportTypeList.Type.2"], report_types[1])
def test_get_report(self):
"""GetReport operation."""
report_id = "wwqrl4bHvD"
params = self.api.get_report(report_id=report_id)
self.assert_common_params(params, action="GetReport")
self.assertEqual(params["ReportId"], report_id)
def test_get_report_schedule_list(self):
"""GetReportScheduleList operation."""
report_types = [
"_GET_FBA_FULFILLMENT_INBOUND_NONCOMPLIANCE_DATA_",
"_GET_RESTOCK_INVENTORY_RECOMMENDATIONS_REPORT_",
]
params = self.api.get_report_schedule_list(report_types=report_types)
self.assert_common_params(params, action="GetReportScheduleList")
self.assertEqual(params["ReportTypeList.Type.1"], report_types[0])
self.assertEqual(params["ReportTypeList.Type.2"], report_types[1])
def test_get_report_schedule_list_by_next_token(self):
"""GetReportScheduleListByNextToken operation, via method decorator."""
next_token = "Yj3hOfPcIE"
params = self.api.get_report_schedule_list(next_token=next_token)
self.assert_common_params(params, action="GetReportScheduleListByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_report_schedule_list_by_next_token_alias(self):
"""GetReportScheduleListByNextToken operation, via alias method."""
next_token = "SAlt4JwJGv"
params = self.api.get_report_schedule_list_by_next_token(next_token)
self.assert_common_params(params, action="GetReportScheduleListByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_get_report_schedule_count(self):
"""GetReportScheduleCount operation."""
report_types = [
"_GET_STRANDED_INVENTORY_UI_DATA_",
"_GET_FBA_ESTIMATED_FBA_FEES_TXT_DATA_",
]
params = self.api.get_report_schedule_count(report_types=report_types)
self.assert_common_params(params, action="GetReportScheduleCount")
self.assertEqual(params["ReportTypeList.Type.1"], report_types[0])
self.assertEqual(params["ReportTypeList.Type.2"], report_types[1])
# # TODO Complete when method is available in Reports
# def test_update_report_acknowledgements(self):
# """UpdateReportAcknowledgements operation."""
# pass
| 43.442857
| 84
| 0.669434
|
a08bc960244b71973edb217b0dc1d905621d2e4c
| 3,513
|
py
|
Python
|
users/forms.py
|
sLeeNguyen/sales-support
|
3f0a6977c8c26743373a70b4296516b7a71ccf4a
|
[
"Apache-2.0"
] | 1
|
2021-03-22T14:07:30.000Z
|
2021-03-22T14:07:30.000Z
|
users/forms.py
|
sLeeNguyen/sales-support
|
3f0a6977c8c26743373a70b4296516b7a71ccf4a
|
[
"Apache-2.0"
] | null | null | null |
users/forms.py
|
sLeeNguyen/sales-support
|
3f0a6977c8c26743373a70b4296516b7a71ccf4a
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils import timezone
from users.models import User
class RegisterForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirm password', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'display_name', 'gender', 'birthday', 'phone_number')
def clean_birthday(self):
# Check that the birthday is not in the past
birthday = self.cleaned_data.get('birthday')
if birthday.date > timezone.now().date:
raise forms.ValidationError("Birthday must be in the past")
return birthday
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
class UserAdminCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'display_name', 'gender', 'birthday', 'phone_number')
def clean_birthday(self):
# Check that the birthday is not in the past
birthday = self.cleaned_data.get('birthday')
if birthday.date > timezone.now().date:
raise forms.ValidationError("Birthday must be in the past")
return birthday
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserAdminCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserAdminChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'display_name', 'gender', 'birthday', 'phone_number', 'user_permissions',
'is_active')
def clean_birthday(self):
# Check that the birthday is not in the past
birthday = self.cleaned_data.get('birthday')
if birthday is None:
return
if birthday.day > timezone.now().day:
raise forms.ValidationError("Birthday must be in the past")
return birthday
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| 37.37234
| 118
| 0.670367
|
60996521dc1adbae40409a9461c620f4a76c94b6
| 1,310
|
py
|
Python
|
common/vector.py
|
ollinevalainen/satellitetools
|
c36cb686bb6d87d5268890706d71f2144144b4c0
|
[
"MIT"
] | 6
|
2021-02-26T09:17:15.000Z
|
2022-01-10T17:10:04.000Z
|
common/vector.py
|
ollinevalainen/satellitetools
|
c36cb686bb6d87d5268890706d71f2144144b4c0
|
[
"MIT"
] | 2
|
2020-06-09T09:55:45.000Z
|
2022-02-23T12:36:01.000Z
|
common/vector.py
|
ollinevalainen/satellitetools
|
c36cb686bb6d87d5268890706d71f2144144b4c0
|
[
"MIT"
] | 1
|
2021-06-08T01:09:22.000Z
|
2021-06-08T01:09:22.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 11:23:20 2021
@author: Olli Nevalainen (Finnish Meteorological Institute)
"""
import pyproj
from shapely.ops import transform
import numpy as np
def transform_crs(aoi_geometry, src_crs: str, dst_crs: str):
# Tranform aoi_geometry to raster crs
project = pyproj.Transformer.from_proj(
pyproj.Proj(src_crs), # source coordinate system
pyproj.Proj(dst_crs),
always_xy=True,
)
aoi_geometry_transformed = transform(project.transform, aoi_geometry)
return aoi_geometry_transformed
def expand_bounds(bbox_coordinates: list, amount: float):
bbox_coordinates[0] = bbox_coordinates[0] - amount
bbox_coordinates[1] = bbox_coordinates[1] - amount
bbox_coordinates[2] = bbox_coordinates[2] + amount
bbox_coordinates[3] = bbox_coordinates[3] + amount
return bbox_coordinates
def create_coordinate_arrays(profile):
dx = profile["transform"].a
dy = profile["transform"].e
# upperleft corner coordinates
x_ul = profile["transform"].c
y_ul = profile["transform"].f
width = profile["width"]
height = profile["height"]
xs = np.array([x_ul + i * dx for i in range(width)])
ys = np.array([y_ul + i * dy for i in range(height)])
return xs, ys
| 27.291667
| 73
| 0.69542
|
66816d44facf7f17d6c0d792bd61c5e6f405386f
| 1,535
|
py
|
Python
|
crosshair/auditwall_test.py
|
lmontand/CrossHair
|
ddcea5a19ea2ba48ac47fff4dcf1a1aa4479393c
|
[
"MIT"
] | null | null | null |
crosshair/auditwall_test.py
|
lmontand/CrossHair
|
ddcea5a19ea2ba48ac47fff4dcf1a1aa4479393c
|
[
"MIT"
] | null | null | null |
crosshair/auditwall_test.py
|
lmontand/CrossHair
|
ddcea5a19ea2ba48ac47fff4dcf1a1aa4479393c
|
[
"MIT"
] | null | null | null |
import os
from subprocess import call
import sys
import urllib.request
from crosshair.auditwall import engage_auditwall
from crosshair.auditwall import SideEffectDetected
# audit hooks cannot be uninstalled, and we don't want to wall off the
# testing process. Spawn subprcoesses instead.
if sys.version_info >= (3, 8): # audithook is new in 3.8
def test_fs_read_allowed():
assert call(["python", __file__, "read_open", "withwall"]) != 10
def test_scandir_allowed():
assert call(["python", __file__, "scandir", "withwall"]) == 0
def test_import_allowed():
assert call(["python", __file__, "import", "withwall"]) == 0
def test_fs_write_disallowed():
assert call(["python", __file__, "write_open", "withwall"]) == 10
def test_http_disallowed():
assert call(["python", __file__, "http", "withwall"]) == 10
def test_unlink_disallowed():
assert call(["python", __file__, "unlink", "withwall"]) == 10
_ACTIONS = {
"read_open": lambda: open("/dev/null", "rb"),
"scandir": lambda: os.scandir("."),
"import": lambda: __import__("shutil"),
"write_open": lambda: open("/.auditwall.testwrite.txt", "w"),
"http": lambda: urllib.request.urlopen("http://localhost/foo"),
"unlink": lambda: os.unlink("./delme.txt"),
}
if __name__ == "__main__":
action, wall = sys.argv[1:]
if wall == "withwall":
engage_auditwall()
try:
_ACTIONS[action]()
except SideEffectDetected as e:
print(e)
sys.exit(10)
| 29.519231
| 73
| 0.646906
|
b39c5d68bad6d5f91049f9c0c7f3a94159af3a87
| 817
|
py
|
Python
|
map/models.py
|
bobvoorneveld/spindlechannels
|
2afc4bf9f021bdef39a6a3f610d2eaff43618ca8
|
[
"MIT"
] | 1
|
2016-10-05T12:38:47.000Z
|
2016-10-05T12:38:47.000Z
|
map/models.py
|
bobvoorneveld/spindlechannels
|
2afc4bf9f021bdef39a6a3f610d2eaff43618ca8
|
[
"MIT"
] | null | null | null |
map/models.py
|
bobvoorneveld/spindlechannels
|
2afc4bf9f021bdef39a6a3f610d2eaff43618ca8
|
[
"MIT"
] | 2
|
2017-02-16T08:53:40.000Z
|
2019-01-11T13:18:38.000Z
|
import json
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from geojson import Feature
class Marker(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
location = models.PointField()
user = models.ForeignKey(User, null=True, blank=True, default=None)
@property
def geojson_feature(self):
return Feature(
geometry=json.loads(self.location.geojson),
id=self.pk,
properties={
# 'name': '',
'created': str(self.created),
'modified': str(self.modified),
'model': 'Marker',
'pk': self.pk,
'user': self.user.pk if self.user else -1,
}
)
| 28.172414
| 71
| 0.583843
|
e6a141456301d8a9d5fcc258d7b6aa4afd6dc152
| 3,892
|
py
|
Python
|
gen_anchors.py
|
andreasmarxer/keras-yolo3
|
b56bf38c830c6a57a299335aa2192fe845830264
|
[
"MIT"
] | null | null | null |
gen_anchors.py
|
andreasmarxer/keras-yolo3
|
b56bf38c830c6a57a299335aa2192fe845830264
|
[
"MIT"
] | null | null | null |
gen_anchors.py
|
andreasmarxer/keras-yolo3
|
b56bf38c830c6a57a299335aa2192fe845830264
|
[
"MIT"
] | null | null | null |
import random
import argparse
import numpy as np
from voc import parse_voc_annotation
import json
def IOU(ann, centroids):
w, h = ann
similarities = []
for centroid in centroids:
c_w, c_h = centroid
if c_w >= w and c_h >= h:
similarity = w*h/(c_w*c_h)
elif c_w >= w and c_h <= h:
similarity = w*c_h/(w*h + (c_w-w)*c_h)
elif c_w <= w and c_h >= h:
similarity = c_w*h/(w*h + c_w*(c_h-h))
else: #means both w,h are bigger than c_w and c_h respectively
similarity = (c_w*c_h)/(w*h)
similarities.append(similarity) # will become (k,) shape
return np.array(similarities)
def avg_IOU(anns, centroids):
n,d = anns.shape
sum = 0.
for i in range(anns.shape[0]):
sum+= max(IOU(anns[i], centroids))
return sum/n
def print_anchors(centroids):
out_string = ''
anchors = centroids.copy()
widths = anchors[:, 0]
sorted_indices = np.argsort(widths)
r = "anchors: ["
for i in sorted_indices:
out_string += str(int(anchors[i,0]*416)) + ',' + str(int(anchors[i,1]*416)) + ', '
print(out_string[:-2])
def run_kmeans(ann_dims, anchor_num):
ann_num = ann_dims.shape[0]
iterations = 0
prev_assignments = np.ones(ann_num)*(-1)
iteration = 0
old_distances = np.zeros((ann_num, anchor_num))
indices = [random.randrange(ann_dims.shape[0]) for i in range(anchor_num)]
centroids = ann_dims[indices]
anchor_dim = ann_dims.shape[1]
while True:
distances = []
iteration += 1
for i in range(ann_num):
d = 1 - IOU(ann_dims[i], centroids)
distances.append(d)
distances = np.array(distances) # distances.shape = (ann_num, anchor_num)
print("iteration {}: dists = {}".format(iteration, np.sum(np.abs(old_distances-distances))))
#assign samples to centroids
assignments = np.argmin(distances,axis=1)
if (assignments == prev_assignments).all() :
return centroids
#calculate new centroids
centroid_sums=np.zeros((anchor_num, anchor_dim), np.float)
for i in range(ann_num):
centroid_sums[assignments[i]]+=ann_dims[i]
for j in range(anchor_num):
centroids[j] = centroid_sums[j]/(np.sum(assignments==j) + 1e-6)
prev_assignments = assignments.copy()
old_distances = distances.copy()
def _main_(argv):
config_path = args.conf
num_anchors = args.anchors
with open(config_path) as config_buffer:
config = json.loads(config_buffer.read())
train_imgs, train_labels = parse_voc_annotation(
config['train']['train_annot_folder'],
config['train']['train_image_folder'],
config['train']['cache_name'],
config['model']['labels']
)
# run k_mean to find the anchors
annotation_dims = []
for image in train_imgs:
print(image['filename'])
for obj in image['object']:
relative_w = (float(obj['xmax']) - float(obj['xmin']))/image['width']
relatice_h = (float(obj["ymax"]) - float(obj['ymin']))/image['height']
annotation_dims.append(tuple(map(float, (relative_w,relatice_h))))
annotation_dims = np.array(annotation_dims)
centroids = run_kmeans(annotation_dims, num_anchors)
# write anchors to file
print('\naverage IOU for', num_anchors, 'anchors:', '%0.2f' % avg_IOU(annotation_dims, centroids))
print_anchors(centroids)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument(
'-c',
'--conf',
default='config.json',
help='path to configuration file')
argparser.add_argument(
'-a',
'--anchors',
default=9,
help='number of anchors to use')
args = argparser.parse_args()
_main_(args)
| 29.263158
| 102
| 0.611511
|
188e2517a8e8e70d4b7b1b5449af8c145d9fd78f
| 7,328
|
py
|
Python
|
aea/test_tools/generic.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
aea/test_tools/generic.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
aea/test_tools/generic.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains generic tools for AEA end-to-end testing."""
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, cast
from aea.configurations.base import (
CRUDCollection,
ComponentConfiguration,
PackageConfiguration,
PackageType,
PublicId,
SkillConfig,
dependencies_from_json,
)
from aea.configurations.manager import handle_dotted_path
from aea.exceptions import enforce
from aea.helpers.file_io import write_envelope
from aea.helpers.yaml_utils import yaml_dump, yaml_dump_all
from aea.mail.base import Envelope
from aea.test_tools.constants import DEFAULT_AUTHOR
def write_envelope_to_file(envelope: Envelope, file_path: str) -> None:
"""
Write an envelope to a file.
:param envelope: Envelope.
:param file_path: the file path
:return: None
"""
with open(Path(file_path), "ab+") as f:
write_envelope(envelope, f)
def read_envelope_from_file(file_path: str):
"""
Read an envelope from a file.
:param file_path the file path.
:return: envelope
"""
lines = []
with open(Path(file_path), "rb+") as f:
lines.extend(f.readlines())
enforce(len(lines) == 2, "Did not find two lines.")
line = lines[0] + lines[1]
to_b, sender_b, protocol_id_b, message, end = line.strip().split(b",", maxsplit=4)
to = to_b.decode("utf-8")
sender = sender_b.decode("utf-8")
protocol_id = PublicId.from_str(protocol_id_b.decode("utf-8"))
enforce(end in [b"", b"\n"], "Envelope improperly formatted.")
return Envelope(to=to, sender=sender, protocol_id=protocol_id, message=message,)
def _nested_set(
configuration_obj: PackageConfiguration, keys: List, value: Any
) -> None:
"""
Nested set a value to a dict. Force sets the values, overwriting any present values, but maintaining schema validation.
:param configuration_obj: configuration object
:param keys: list of keys.
:param value: a value to set.
:return: None.
"""
def get_nested_ordered_dict_from_dict(input_dict: Dict) -> Dict:
_dic = {}
for _key, _value in input_dict.items():
if isinstance(_value, dict):
_dic[_key] = OrderedDict(get_nested_ordered_dict_from_dict(_value))
else:
_dic[_key] = _value
return _dic
def get_nested_ordered_dict_from_keys_and_value(
keys: List[str], value: Any
) -> Dict:
_dic = (
OrderedDict(get_nested_ordered_dict_from_dict(value))
if isinstance(value, dict)
else value
)
for key in keys[::-1]:
_dic = OrderedDict({key: _dic})
return _dic
root_key = keys[0]
if (
isinstance(configuration_obj, SkillConfig)
and root_key in SkillConfig.FIELDS_WITH_NESTED_FIELDS
):
root_attr = getattr(configuration_obj, root_key)
length = len(keys)
if length < 3 or keys[2] not in SkillConfig.NESTED_FIELDS_ALLOWED_TO_UPDATE:
raise ValueError(f"Invalid keys={keys}.") # pragma: nocover
skill_component_id = keys[1]
skill_component_config = root_attr.read(skill_component_id)
if length == 3 and isinstance(value, dict): # root.skill_component_id.args
# set all args
skill_component_config.args = get_nested_ordered_dict_from_dict(value)
elif len(keys) >= 4: # root.skill_component_id.args.[keys]
# update some args
dic = get_nested_ordered_dict_from_keys_and_value(keys[3:], value)
skill_component_config.args.update(dic)
else:
raise ValueError( # pragma: nocover
f"Invalid keys={keys} and values={value}."
)
root_attr.update(skill_component_id, skill_component_config)
else:
root_attr = getattr(configuration_obj, root_key)
if isinstance(root_attr, CRUDCollection):
if isinstance(value, dict) and len(keys) == 1: # root.
for _key, _value in value.items():
dic = get_nested_ordered_dict_from_keys_and_value([_key], _value)
root_attr.update(_key, dic[_key])
elif len(keys) >= 2: # root.[keys]
dic = get_nested_ordered_dict_from_keys_and_value(keys[1:], value)
root_attr.update(keys[1], dic[keys[1]])
else:
raise ValueError( # pragma: nocover
f"Invalid keys={keys} and values={value}."
)
elif root_key == "dependencies":
enforce(
isinstance(configuration_obj, ComponentConfiguration),
"Cannot only set dependencies to ComponentConfiguration instances.",
)
configuration_obj = cast(ComponentConfiguration, configuration_obj)
new_pypi_dependencies = dependencies_from_json(value)
configuration_obj.pypi_dependencies = new_pypi_dependencies
else:
dic = get_nested_ordered_dict_from_keys_and_value(keys, value)
setattr(configuration_obj, root_key, dic[root_key])
def nested_set_config(
dotted_path: str, value: Any, author: str = DEFAULT_AUTHOR
) -> None:
"""
Set an AEA config with nested values.
Run from agent's directory.
Allowed dotted_path:
'agent.an_attribute_name'
'protocols.my_protocol.an_attribute_name'
'connections.my_connection.an_attribute_name'
'contracts.my_contract.an_attribute_name'
'skills.my_skill.an_attribute_name'
'vendor.author.[protocols|connections|skills].package_name.attribute_name
:param dotted_path: dotted path to a setting.
:param value: a value to assign. Must be of yaml serializable type.
:param author: the author name, used to parse the dotted path.
:return: None.
"""
settings_keys, config_file_path, config_loader, _ = handle_dotted_path(
dotted_path, author
)
with config_file_path.open() as fp:
config = config_loader.load(fp)
_nested_set(config, settings_keys, value)
if config.package_type == PackageType.AGENT:
json_data = config.ordered_json
component_configurations = json_data.pop("component_configurations")
with config_file_path.open("w") as fp:
yaml_dump_all([json_data] + component_configurations, fp)
else:
with config_file_path.open("w") as fp:
yaml_dump(config.ordered_json, fp)
| 36.457711
| 123
| 0.647789
|
a1fbd5dd67bd48977dc5a19e25cbcdc09a7bb5a8
| 1,913
|
py
|
Python
|
experiments/finetune/sanitize.py
|
lxuechen/swissknife
|
43dbd36f1e998ebe29c0b85fafd0de765dfb5de8
|
[
"MIT"
] | 1
|
2022-02-25T00:00:30.000Z
|
2022-02-25T00:00:30.000Z
|
experiments/finetune/sanitize.py
|
lxuechen/swissknife
|
43dbd36f1e998ebe29c0b85fafd0de765dfb5de8
|
[
"MIT"
] | null | null | null |
experiments/finetune/sanitize.py
|
lxuechen/swissknife
|
43dbd36f1e998ebe29c0b85fafd0de765dfb5de8
|
[
"MIT"
] | null | null | null |
"""Convert the generations to the desired format."""
import json
import fire
from swissknife import utils
def dedup_generations():
"""Deduplicate generations -- need a single generation for a single prompt."""
ref_file = "/Users/xuechenli/data/e2e_gpt3_full/test.jsonl"
in_file = "./e2e-test-fine-tuned-curie.txt"
out_file = "./e2e-test-fine-tuned-curie-dedup.txt"
with open(ref_file, 'r') as f:
ref_dicts = [json.loads(line.strip()) for line in f.readlines()]
with open(in_file, 'r') as f:
in_lines = [line.strip() for line in f.readlines()]
out_lines = []
last_prompt = 'NA'
for in_line, ref_dict in utils.zip_(in_lines, ref_dicts):
prompt = ref_dict["prompt"]
if prompt == last_prompt:
continue
last_prompt = prompt
out_lines.append(in_line)
with open(out_file, 'w') as g:
g.writelines('\n'.join(out_lines))
def dedup_prompts():
"""Deduplicate the test file -- collect the non-duplicate prompts."""
ref_file = "/Users/xuechenli/data/e2e_gpt3_full/test.jsonl"
out_file = "/Users/xuechenli/data/e2e_gpt3_full/test-dedup.jsonl"
with open(ref_file, 'r') as f:
ref_dicts = [json.loads(line.strip()) for line in f.readlines()]
out_lines = []
last_prompt = 'NA'
for ref_dict in ref_dicts:
prompt = ref_dict["prompt"]
if prompt == last_prompt:
continue
last_prompt = prompt
out_lines.append(json.dumps(ref_dict).strip())
with open(out_file, 'w') as g:
g.writelines('\n'.join(out_lines))
def main(task="dedup_prompts"):
if task == "dedup_prompts":
# python sanitize.py --task dedup_prompts
dedup_prompts()
elif task == "dedup_generations":
dedup_generations()
else:
raise ValueError(f"Unknown task: {task}")
if __name__ == "__main__":
fire.Fire(main)
| 27.328571
| 82
| 0.637219
|
cd6535c4cd98bf578bb2844ba3f0cbbca9159420
| 1,707
|
py
|
Python
|
sosw/test/unit/test_labourer.py
|
EllaMozes/sosw
|
06686332bc4e7774715dd0d27c8bfa3890b4a340
|
[
"MIT"
] | 7
|
2019-07-28T17:54:21.000Z
|
2021-02-20T21:14:11.000Z
|
sosw/test/unit/test_labourer.py
|
EllaMozes/sosw
|
06686332bc4e7774715dd0d27c8bfa3890b4a340
|
[
"MIT"
] | 48
|
2019-01-28T21:45:28.000Z
|
2019-07-23T08:19:56.000Z
|
sosw/test/unit/test_labourer.py
|
EllaMozes/sosw
|
06686332bc4e7774715dd0d27c8bfa3890b4a340
|
[
"MIT"
] | 10
|
2019-07-29T17:56:34.000Z
|
2022-01-18T10:36:22.000Z
|
import os
import time
import unittest
from unittest.mock import patch
from sosw.labourer import Labourer
os.environ["STAGE"] = "test"
os.environ["autotest"] = "True"
class Labourer_UnitTestCase(unittest.TestCase):
def setUp(self):
self.labourer = Labourer(id=42, arn='arn::aws::lambda')
def test_init(self):
self.assertEqual(self.labourer.id, 42)
self.assertEqual(self.labourer.arn, 'arn::aws::lambda')
def test_init_attrs(self):
lab = Labourer(id='foo', arn='arn::aws::lambda', max_invocations=13)
self.assertEqual(lab.id, 'foo')
self.assertEqual(lab.arn, 'arn::aws::lambda')
self.assertEqual(lab.max_invocations, 13)
def test_init__strict_raises(self):
self.assertRaises(AttributeError, Labourer, **{'foo': 'bar', 'strict': True}), \
f"Labourer supports only {Labourer.ATTRIBUTES}"
def test_set_defaults__called(self):
with patch('sosw.labourer.Labourer.set_defaults') as sd:
lab = Labourer(id=42)
sd.assert_called_once()
def test_set_defaults(self):
self.assertEqual(self.labourer.duration, 900)
def test_set_defaults_overrides(self):
lab = Labourer(id=42, duration=300)
self.assertEqual(lab.duration, 300)
def test_get_attr(self):
self.assertRaises(ValueError, self.labourer.get_attr, 'invalid')
self.assertRaises(AttributeError, self.labourer.get_attr, 'start')
def test_set_custom_attributes(self):
self.assertIsNone(getattr(self.labourer, 'start', None))
self.labourer.set_custom_attribute('start', time.time())
self.assertLessEqual(self.labourer.start, time.time())
| 25.863636
| 88
| 0.674868
|
31e260e512621aeb4cd5ce082d219395415f7321
| 4,996
|
py
|
Python
|
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/InsertApplicationRequest.py
|
ankitdobhal/aliyun-openapi-python-sdk
|
991b1c2d91adc468480defc23ba790d4369cce7b
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/InsertApplicationRequest.py
|
ankitdobhal/aliyun-openapi-python-sdk
|
991b1c2d91adc468480defc23ba790d4369cce7b
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/InsertApplicationRequest.py
|
ankitdobhal/aliyun-openapi-python-sdk
|
991b1c2d91adc468480defc23ba790d4369cce7b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class InsertApplicationRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'InsertApplication','edas')
self.set_uri_pattern('/pop/v5/changeorder/co_create_app')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_WebContainer(self):
return self.get_query_params().get('WebContainer')
def set_WebContainer(self,WebContainer):
self.add_query_param('WebContainer',WebContainer)
def get_EcuInfo(self):
return self.get_query_params().get('EcuInfo')
def set_EcuInfo(self,EcuInfo):
self.add_query_param('EcuInfo',EcuInfo)
def get_MinHeapSize(self):
return self.get_query_params().get('MinHeapSize')
def set_MinHeapSize(self,MinHeapSize):
self.add_query_param('MinHeapSize',MinHeapSize)
def get_BuildPackId(self):
return self.get_query_params().get('BuildPackId')
def set_BuildPackId(self,BuildPackId):
self.add_query_param('BuildPackId',BuildPackId)
def get_ComponentIds(self):
return self.get_query_params().get('ComponentIds')
def set_ComponentIds(self,ComponentIds):
self.add_query_param('ComponentIds',ComponentIds)
def get_HealthCheckUrl(self):
return self.get_query_params().get('HealthCheckUrl')
def set_HealthCheckUrl(self,HealthCheckUrl):
self.add_query_param('HealthCheckUrl',HealthCheckUrl)
def get_ReservedPortStr(self):
return self.get_query_params().get('ReservedPortStr')
def set_ReservedPortStr(self,ReservedPortStr):
self.add_query_param('ReservedPortStr',ReservedPortStr)
def get_JvmOptions(self):
return self.get_query_params().get('JvmOptions')
def set_JvmOptions(self,JvmOptions):
self.add_query_param('JvmOptions',JvmOptions)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Cpu(self):
return self.get_query_params().get('Cpu')
def set_Cpu(self,Cpu):
self.add_query_param('Cpu',Cpu)
def get_MaxPermSize(self):
return self.get_query_params().get('MaxPermSize')
def set_MaxPermSize(self,MaxPermSize):
self.add_query_param('MaxPermSize',MaxPermSize)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_MaxHeapSize(self):
return self.get_query_params().get('MaxHeapSize')
def set_MaxHeapSize(self,MaxHeapSize):
self.add_query_param('MaxHeapSize',MaxHeapSize)
def get_EnablePortCheck(self):
return self.get_query_params().get('EnablePortCheck')
def set_EnablePortCheck(self,EnablePortCheck):
self.add_query_param('EnablePortCheck',EnablePortCheck)
def get_ApplicationName(self):
return self.get_query_params().get('ApplicationName')
def set_ApplicationName(self,ApplicationName):
self.add_query_param('ApplicationName',ApplicationName)
def get_Jdk(self):
return self.get_query_params().get('Jdk')
def set_Jdk(self,Jdk):
self.add_query_param('Jdk',Jdk)
def get_Mem(self):
return self.get_query_params().get('Mem')
def set_Mem(self,Mem):
self.add_query_param('Mem',Mem)
def get_LogicalRegionId(self):
return self.get_query_params().get('LogicalRegionId')
def set_LogicalRegionId(self,LogicalRegionId):
self.add_query_param('LogicalRegionId',LogicalRegionId)
def get_EnableUrlCheck(self):
return self.get_query_params().get('EnableUrlCheck')
def set_EnableUrlCheck(self,EnableUrlCheck):
self.add_query_param('EnableUrlCheck',EnableUrlCheck)
def get_PackageType(self):
return self.get_query_params().get('PackageType')
def set_PackageType(self,PackageType):
self.add_query_param('PackageType',PackageType)
def get_Hooks(self):
return self.get_query_params().get('Hooks')
def set_Hooks(self,Hooks):
self.add_query_param('Hooks',Hooks)
| 31.421384
| 78
| 0.76241
|
0e8118b8c7f4d969197ae2d2a8d94ae7d4bc15fb
| 2,665
|
py
|
Python
|
fairseq/examples/deep_pyramidion/pooler_utils.py
|
applicaai/pyramidions
|
fa9dddc8fe98ffe5d45dd04dd502f33cb924e667
|
[
"CECILL-B"
] | 1
|
2022-03-08T21:59:53.000Z
|
2022-03-08T21:59:53.000Z
|
fairseq/examples/deep_pyramidion/pooler_utils.py
|
applicaai/pyramidions
|
fa9dddc8fe98ffe5d45dd04dd502f33cb924e667
|
[
"CECILL-B"
] | null | null | null |
fairseq/examples/deep_pyramidion/pooler_utils.py
|
applicaai/pyramidions
|
fa9dddc8fe98ffe5d45dd04dd502f33cb924e667
|
[
"CECILL-B"
] | null | null | null |
import torch
from torch import nn as nn
from .pooler import Pooler
from .successive_halving_topk import TopKConfig, TopKOperator
_supports_blockwise = ['pooler', 'only_blockwise']
class TopkPooler(Pooler):
"""
Token Pooler.
Args:
args (configargparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.args = args
self._prepare_pooler()
self.epsilon = 0.00001
def _prepare_pooler(self):
if self.args.encoder_pooling != "lambda":
self._set_scorer_architecture()
self._set_softselector_method()
else:
self.scorer = None
def _set_softselector_method(self):
if self.args.encoder_pooling == "topk":
self.selector = TopKOperator()
self.pooler_config = TopKConfig(
input_len=self.args.max_source_positions,
pooled_len=None, # Not known ahead, will be set dynamically
flip_right=self.args.flip_right,
base=20,
hard_topk_inference=False,
)
def _set_scorer_architecture(self):
if self.args.encoder_pooling_arch == "linear":
self.scorer = nn.ModuleList(
[
nn.Linear(self.args.encoder_embed_dim, 1)
for el in range(0, self.args.encoder_layers)
]
)
else:
self.scorer = None
def forward(
self, encoder_out, layer_i=-1, **kwargs
):
"""
Args:
encoded_tokens (FloatTensor): encoded tokens in the source language of shape
`(batch, src_len, emb_dim)`
"""
if self.is_lambda:
return encoder_out
else:
encoded_tokens = encoder_out.permute(1, 0, 2)
bs, input_seq_len, emb_dims = encoded_tokens.shape
if self.selector.pooled_len == input_seq_len:
return encoder_out
assert layer_i >= 0 and isinstance(self.scorer, nn.ModuleList) # FIXME: Remove
token_logits = self.scorer[layer_i](encoded_tokens)
assert not torch.isnan(token_logits).any()
assert token_logits.shape == torch.Size([bs, input_seq_len, 1])
pooled_output, pooled_scores = self.selector(
encoded_tokens, torch.sigmoid(token_logits) + self.epsilon
)
assert not torch.isnan(pooled_output).any()
assert pooled_output.shape == torch.Size([bs, self.pooler_config.pooled_len, emb_dims])
return pooled_output.permute(1, 0, 2)
| 31.352941
| 99
| 0.589869
|
9b497028b46fbe084f1c04184a918fb1f9efc7d1
| 4,680
|
py
|
Python
|
examples/contrib/set_covering4.py
|
klorel/or-tools
|
f3fd201e68cf75b7720ff5c3cadc599a1d02b54b
|
[
"Apache-2.0"
] | 279
|
2015-01-10T09:55:35.000Z
|
2022-03-28T02:34:03.000Z
|
examples/contrib/set_covering4.py
|
walkerke/or-tools
|
39f44709bba203f5ff3bc18fab8098739f189a6d
|
[
"Apache-2.0"
] | 10
|
2017-10-05T15:48:50.000Z
|
2021-09-20T12:06:52.000Z
|
examples/contrib/set_covering4.py
|
walkerke/or-tools
|
39f44709bba203f5ff3bc18fab8098739f189a6d
|
[
"Apache-2.0"
] | 83
|
2015-01-20T03:44:00.000Z
|
2022-03-13T23:53:06.000Z
|
# Copyright 2010 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set partition and set covering in Google CP Solver.
Example from the Swedish book
Lundgren, Roennqvist, Vaebrand
'Optimeringslaera' (translation: 'Optimization theory'),
page 408.
* Set partition:
We want to minimize the cost of the alternatives which covers all the
objects, i.e. all objects must be choosen. The requirement is than an
object may be selected _exactly_ once.
Note: This is 1-based representation
Alternative Cost Object
1 19 1,6
2 16 2,6,8
3 18 1,4,7
4 13 2,3,5
5 15 2,5
6 19 2,3
7 15 2,3,4
8 17 4,5,8
9 16 3,6,8
10 15 1,6,7
The problem has a unique solution of z = 49 where alternatives
3, 5, and 9
is selected.
* Set covering:
If we, however, allow that an object is selected _more than one time_,
then the solution is z = 45 (i.e. less cost than the first problem),
and the alternatives
4, 8, and 10
is selected, where object 5 is selected twice (alt. 4 and 8).
It's an unique solution as well.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/set_covering4.mzn
* Comet : http://www.hakank.org/comet/set_covering4.co
* ECLiPSe : http://www.hakank.org/eclipse/set_covering4.ecl
* SICStus : http://www.hakank.org/sicstus/set_covering4.pl
* Gecode : http://www.hakank.org/gecode/set_covering4.cpp
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.constraint_solver import pywrapcp
def main(set_partition=1):
# Create the solver.
solver = pywrapcp.Solver("Set partition and set covering")
#
# data
#
num_alternatives = 10
num_objects = 8
# costs for the alternatives
costs = [19, 16, 18, 13, 15, 19, 15, 17, 16, 15]
# the alternatives, and their objects
a = [
# 1 2 3 4 5 6 7 8 the objects
[1, 0, 0, 0, 0, 1, 0, 0], # alternative 1
[0, 1, 0, 0, 0, 1, 0, 1], # alternative 2
[1, 0, 0, 1, 0, 0, 1, 0], # alternative 3
[0, 1, 1, 0, 1, 0, 0, 0], # alternative 4
[0, 1, 0, 0, 1, 0, 0, 0], # alternative 5
[0, 1, 1, 0, 0, 0, 0, 0], # alternative 6
[0, 1, 1, 1, 0, 0, 0, 0], # alternative 7
[0, 0, 0, 1, 1, 0, 0, 1], # alternative 8
[0, 0, 1, 0, 0, 1, 0, 1], # alternative 9
[1, 0, 0, 0, 0, 1, 1, 0] # alternative 10
]
#
# declare variables
#
x = [solver.IntVar(0, 1, "x[%i]" % i) for i in range(num_alternatives)]
#
# constraints
#
# sum the cost of the choosen alternative,
# to be minimized
z = solver.ScalProd(x, costs)
#
for j in range(num_objects):
if set_partition == 1:
solver.Add(
solver.SumEquality([x[i] * a[i][j] for i in range(num_alternatives)],
1))
else:
solver.Add(
solver.SumGreaterOrEqual(
[x[i] * a[i][j] for i in range(num_alternatives)], 1))
objective = solver.Minimize(z, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.AddObjective(z)
collector = solver.LastSolutionCollector(solution)
solver.Solve(
solver.Phase([x[i] for i in range(num_alternatives)],
solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT),
[collector, objective])
print("z:", collector.ObjectiveValue(0))
print(
"selected alternatives:",
[i + 1 for i in range(num_alternatives) if collector.Value(0, x[i]) == 1])
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
print("Set partition:")
main(1)
print("\nSet covering:")
main(0)
| 30
| 80
| 0.601496
|
378895e357f05da28aadcedc7112ed24098f2b45
| 3,634
|
py
|
Python
|
vggish/vggish_input.py
|
vnyennhi/AICovidVN-115M-Jindo
|
fce515f883f5e99e7670cdbdaa6272d523a2917a
|
[
"MIT"
] | 10
|
2021-07-08T14:02:46.000Z
|
2021-10-10T21:33:49.000Z
|
vggish/vggish_input.py
|
vnyennhi/AICovidVN-115M-Jindo
|
fce515f883f5e99e7670cdbdaa6272d523a2917a
|
[
"MIT"
] | null | null | null |
vggish/vggish_input.py
|
vnyennhi/AICovidVN-115M-Jindo
|
fce515f883f5e99e7670cdbdaa6272d523a2917a
|
[
"MIT"
] | 4
|
2021-07-08T14:02:49.000Z
|
2021-08-04T08:01:28.000Z
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute input examples for VGGish from audio waveform."""
import numpy as np
#import resampy
import mel_features
import vggish_params
try:
import soundfile as sf
def wav_read(wav_file):
wav_data, sr = sf.read(wav_file, dtype='int16')
return wav_data, sr
except ImportError:
def wav_read(wav_file):
raise NotImplementedError('WAV file reading requires soundfile package.')
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
"""
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
#if sample_rate != vggish_params.SAMPLE_RATE:
#data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
wav_data, sr = wav_read(wav_file)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
return waveform_to_examples(samples, sr)
| 18.829016
| 80
| 0.716566
|
3938ff379a5805ed1b09439f45bcdd848e08de8b
| 3,762
|
py
|
Python
|
telemetry/telemetry/internal/backends/chrome/cast_browser_finder.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | null | null | null |
telemetry/telemetry/internal/backends/chrome/cast_browser_finder.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | null | null | null |
telemetry/telemetry/internal/backends/chrome/cast_browser_finder.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2022 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds browsers that can be Cast to and controlled by telemetry."""
from __future__ import absolute_import
import platform
import sys
from telemetry.core import cast_interface
from telemetry.core import platform as telemetry_platform
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.backends.chrome import cast_browser_backend
from telemetry.internal.backends.chrome import chrome_startup_args
from telemetry.internal.platform import cast_device
from telemetry.internal.util import local_first_binary_manager
class UnsupportedExtensionException(Exception):
pass
class PossibleCastBrowser(possible_browser.PossibleBrowser):
def __init__(self, browser_type, finder_options, cast_platform):
del finder_options
super(PossibleCastBrowser, self).__init__(browser_type,
sys.platform.lower(), True)
self._casting_tab = None
self._platform = cast_platform
self._platform_backend = (
cast_platform._platform_backend) # pylint: disable=protected-access
def __repr__(self):
return 'PossibleCastBrowser(app_type=%s)' % self.browser_type
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return None
def _InitPlatformIfNeeded(self):
pass
def _GetPathsForOsPageCacheFlushing(self):
# Cast browsers don't have a need to flush.
return []
def SetCastSender(self, casting_tab):
self._casting_tab = casting_tab
def Create(self):
"""Start the browser process."""
if local_first_binary_manager.LocalFirstBinaryManager.NeedsInit():
local_first_binary_manager.LocalFirstBinaryManager.Init(
'', None, 'linux', platform.machine())
startup_args = chrome_startup_args.GetFromBrowserOptions(
self._browser_options)
browser_backend = cast_browser_backend.CastBrowserBackend(
self._platform_backend, self._browser_options,
self.browser_directory, self.profile_directory,
self._casting_tab)
try:
return browser.Browser(
browser_backend, self._platform_backend, startup_args,
find_existing=False)
except Exception:
browser_backend.Close()
raise
def CleanUpEnvironment(self):
if self._browser_options is None:
return # No environment to clean up.
try:
self._TearDownEnvironment()
finally:
self._browser_options = None
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) > 0:
raise UnsupportedExtensionException(
'Cast browsers do not support extensions.')
return True
def UpdateExecutableIfNeeded(self):
# Updating the browser is currently handled in the Chromium repository
# instead of Catapult.
pass
@property
def last_modification_time(self):
return -1
def SelectDefaultBrowser(possible_browsers):
for b in possible_browsers:
if b.browser_type == 'platform_app':
return b
return None
def FindAllBrowserTypes():
return cast_interface.CAST_BROWSERS
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all available Cast browsers."""
browsers = []
if not isinstance(device, cast_device.CastDevice):
return browsers
cast_platform = telemetry_platform.GetPlatformForDevice(device,
finder_options)
browsers.extend([
PossibleCastBrowser(
finder_options.cast_receiver_type, finder_options, cast_platform)
])
return browsers
| 30.585366
| 75
| 0.736576
|
1a0707d44836b1f345983a7ea4aa8c50e39ea478
| 941
|
py
|
Python
|
utils.py
|
rlaboulaye/transformer
|
119195b2be1d2a3418141a73536d5167e97e06ed
|
[
"MIT"
] | null | null | null |
utils.py
|
rlaboulaye/transformer
|
119195b2be1d2a3418141a73536d5167e97e06ed
|
[
"MIT"
] | 5
|
2021-03-18T21:07:06.000Z
|
2022-03-11T23:30:49.000Z
|
utils.py
|
rlaboulaye/transformer
|
119195b2be1d2a3418141a73536d5167e97e06ed
|
[
"MIT"
] | null | null | null |
import sys
import random
import json
from tqdm import tqdm
import numpy as np
import torch
from jsonschema import validate
from jsonschema.exceptions import ValidationError
def verbose_print(verbose, *args):
if verbose:
print(*args)
def get_iterator(obj, verbose=False):
if verbose:
return tqdm(obj, ncols=80)
return iter(obj)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_device(verbose=True):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if verbose:
print("device: {}".format(device))
return device
def validate_against_schema(schema_instance, schema_path):
with open(schema_path, 'r') as schema_file:
schema = json.load(schema_file)
try:
validate(schema_instance, schema)
except ValidationError as err:
sys.exit('EXCEPTION: THE SCHEMA INSTANCE FAILED TO VALIDATE AGAINST THE SCHEMA.\n\n{}'.format(err))
| 23.525
| 101
| 0.766206
|
7d50328e299ae0bbe1c65ffb5087238417fdf4a9
| 407
|
py
|
Python
|
icbd/compiler/tests/51.py
|
kmod/icbd
|
9636564eb3993afa07c6220d589bbd1991923d74
|
[
"MIT"
] | 7
|
2015-04-06T15:17:13.000Z
|
2020-10-21T04:57:00.000Z
|
icbd/compiler/tests/51.py
|
kmod/icbd
|
9636564eb3993afa07c6220d589bbd1991923d74
|
[
"MIT"
] | null | null | null |
icbd/compiler/tests/51.py
|
kmod/icbd
|
9636564eb3993afa07c6220d589bbd1991923d74
|
[
"MIT"
] | 4
|
2016-05-16T17:53:08.000Z
|
2020-11-28T17:18:50.000Z
|
"""
dicts
"""
d = {}
for i in xrange(10):
d[i] = i ** 2
print d
print {1:[]}
print d.values()
d2 = {}
d2["hello"] = ""
d2["hello"] = "world"
print d2
print d2["hello"]
d3 = {}
d3[1] = 2
d3[1] = 3
print d3[1]
print len(d3)
d4 = {}
d4[(2,)] = ""
d4[(2,)] = "new"
for i in xrange(10):
d4[(i,)] = str(i)
print len(d4)
for i in xrange(20):
print (i,) in d4, d4.get((i,), "nope not in there")
| 11.305556
| 55
| 0.496314
|
9e025aa064d71251ec32eb1e9d8fb641ea197bce
| 989
|
py
|
Python
|
src/datastructures/non_repeating_character.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/datastructures/non_repeating_character.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/datastructures/non_repeating_character.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
"""
Given a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.
Examples:
s = "leetcode"
return 0.
s = "loveleetcode",
return 2.
Note: You may assume the string contain only lowercase letters.
SOLUTION
We scan the string from left to right counting the number occurrences of each character in a hashtable.
Then we perform a second pass and check the counts of every character.
Whenever we hit a count of 1 we return that character, that’s the first unique letter.
Time O(N)
Space O(N)
"""
from collections import defaultdict
from typing import DefaultDict
def first_non_repeating_character(s: str) -> int:
if s is None:
raise ValueError("string must not be None.")
counts: DefaultDict[str, int] = defaultdict(int)
for c in s:
counts[c] += 1
for i, c in enumerate(s):
if counts[c] == 1:
return i
# all chars are repeating
return -1
| 27.472222
| 116
| 0.676441
|
9c0bc80ec3268f61a278835ab49a36d66a3a49f0
| 1,534
|
py
|
Python
|
src/predict_word_prog.py
|
bdebowski/language-modeling
|
114f56a2a4b059eb85eb4d62a208f596a547393b
|
[
"MIT"
] | null | null | null |
src/predict_word_prog.py
|
bdebowski/language-modeling
|
114f56a2a4b059eb85eb4d62a208f596a547393b
|
[
"MIT"
] | null | null | null |
src/predict_word_prog.py
|
bdebowski/language-modeling
|
114f56a2a4b059eb85eb4d62a208f596a547393b
|
[
"MIT"
] | null | null | null |
import os
from src.word_predictor.iword_predictor_factory import ModelName, IWordPredictorFactory
class PredictWordProg:
def __init__(self):
self._text_history = ""
def run(self, word_predictor, num_words=10):
print("<Enter some text to get started...>")
text = self.read_user_input()
self._text_history = text
word_predictor.feed(text)
while True:
self.display(word_predictor.top_n_next(num_words))
text = self.read_user_input()
self._text_history += text
word_predictor.feed(text)
def display(self, top_n_next):
"""
Screen will look like so:
--------------------------------------------
Last n words written are shown here followed by______
score predicted_next_0
score predicted_next_1
score predicted_next_2
score predicted_next_3
score predicted_next_4
> user_input
--------------------------------------------
:param top_n_next: list of 2-tuples (p, w) where w is a word and p is the likelihood of that word.
"""
os.system("cls")
print("{}____".format(self._text_history))
print("")
for p, w in top_n_next:
print("{:3f}\t{}".format(p, w))
print("")
@staticmethod
def read_user_input():
return input("> ")
if __name__ == "__main__":
PredictWordProg().run(IWordPredictorFactory().create_from_name(ModelName.GPT2_LARGE), 100)
| 29.5
| 106
| 0.578879
|
d44d816f5723f83595f9d2ed881d3504875d81e6
| 1,321
|
py
|
Python
|
var/spack/repos/builtin/packages/py-dp-gp-cluster/package.py
|
NeuralEnsemble/spack
|
bed3a694985466544aa16a19af0f0a13221b51a9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-dp-gp-cluster/package.py
|
NeuralEnsemble/spack
|
bed3a694985466544aa16a19af0f0a13221b51a9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-01-08T08:41:11.000Z
|
2022-03-14T19:28:07.000Z
|
var/spack/repos/builtin/packages/py-dp-gp-cluster/package.py
|
foeroyingur/spack
|
5300cbbb2e569190015c72d0970d25425ea38647
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from glob import glob
from os import unlink
from spack import *
class PyDpGpCluster(PythonPackage):
"""DP_GP_cluster clusters genes by expression over a time course using a
Dirichlet process Gaussian process model."""
homepage = "https://github.com/PrincetonUniversity/DP_GP_cluster"
git = "https://github.com/PrincetonUniversity/DP_GP_cluster.git"
version('2019-09-22', commit='eec12e74219f916aa86e253783905f7b5e30f6f4', deprecated=True)
depends_on('python@2.7:2.8', type=('build', 'run'))
# pip silently replaces distutils with setuptools
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('py-gpy@0.8.8:0.9.9', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy@0.14:', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
@run_before('install')
def remove_cython_output(self):
for f in glob('DP_GP/*.c'):
unlink(f)
| 35.702703
| 93
| 0.68433
|
f410270365fe06e04da6479282167e7e80892ede
| 156
|
py
|
Python
|
stream_utils/__init__.py
|
cipher982/birb-watch
|
bdba5455f3b994b143e96b41afbf17d698610454
|
[
"Apache-2.0"
] | null | null | null |
stream_utils/__init__.py
|
cipher982/birb-watch
|
bdba5455f3b994b143e96b41afbf17d698610454
|
[
"Apache-2.0"
] | null | null | null |
stream_utils/__init__.py
|
cipher982/birb-watch
|
bdba5455f3b994b143e96b41afbf17d698610454
|
[
"Apache-2.0"
] | null | null | null |
# __init__.py
from .detector import detector
from .ffmpeg_cmd import transcode
from .plot_boxes import plot_boxes
from .tokens import *
from .onnx import *
| 22.285714
| 34
| 0.801282
|
f438d01c6418c1a9afc6a498ee0ec7ab8af4abbd
| 20,853
|
py
|
Python
|
misc/make_figs_catchmentspace.py
|
LukeEcomod/SpaFHy_v1_Pallas
|
bc8937a6aa72683a765506fc8f967916f81e0f12
|
[
"MIT"
] | 3
|
2019-04-26T02:43:06.000Z
|
2020-10-10T21:49:53.000Z
|
misc/make_figs_catchmentspace.py
|
LukeEcomod/SpaFHy_v1_Pallas
|
bc8937a6aa72683a765506fc8f967916f81e0f12
|
[
"MIT"
] | null | null | null |
misc/make_figs_catchmentspace.py
|
LukeEcomod/SpaFHy_v1_Pallas
|
bc8937a6aa72683a765506fc8f967916f81e0f12
|
[
"MIT"
] | 6
|
2019-06-19T12:12:29.000Z
|
2022-01-14T22:05:03.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 20:49:51 2018
@author: slauniai
"""
import os
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
#import seaborn as sns
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
from mpl_toolkits.axes_grid1 import make_axes_locatable
from canopygrid import eq_evap
eps = np.finfo(float).eps
dt = 86400.0
fig_path = r'C:\repositories\SpaFHy\FigsC3'
def calculate_pvalues(df):
# computes p-values of dataframe
df = df.dropna()._get_numeric_data()
dfcols = pd.DataFrame(columns=df.columns)
pvalues = dfcols.transpose().join(dfcols, how='outer')
for r in df.columns:
for c in df.columns:
pvalues[r][c] = round(stats.pearsonr(df[r], df[c])[1], 4)
return pvalues
def modified_agreementindex(evaluation, simulation):
""" modified willmot's agreement index"""
if len(evaluation) == len(simulation):
s, e = np.array(simulation), np.array(evaluation)
# s, e = simulation, evaluation
# compute numerator and denominator
numerator = sum(abs(e - s))
denominator = sum(abs(s - np.mean(e)) + abs(e - np.mean(e)))
obj = 1 - (numerator / (denominator + eps))
return obj
chm=[['1', '2013-01-01', '2015-12-31', '2013-12-31', 0.025], # lompolojanganoja 514 ha
['2', '2005-01-01', '2008-12-31', '2005-12-31', 0.006], # liuhapuro 170 ha
['3', '2005-01-01', '2015-12-31', '2005-12-31', 0.026], # porkkavaara 72 ha
['10', '2005-01-01', '2013-12-31', '2005-12-31', 0.011], # kelopuro 74 ha. 2014 gappy, 2015 runoff is low
['11', '2014-01-01', '2015-12-31', '2014-12-31', 0.012], # hauklammenoja 137 ha
['13', '2014-01-01', '2015-12-31', '2014-12-31', 0.007], # rudbacken 436 ha
['14', '2005-01-01', '2015-12-31', '2005-12-31', 0.007], # paunulanpuro 154 ha
['16', '2005-01-01', '2015-12-31', '2005-12-31', 0.007], # huhtisuonoja 500 ha. very flat, large fraction is drained peatlands
['17', '2005-01-01', '2015-12-31', '2005-12-31', 0.006], # kesselinpuro 2100 ha
# ['18','2011-01-01', '2015-12-31', '2011-12-31'], # korpijoki, area 12200 ha so not suitable
['19', '2005-01-01', '2015-12-31', '2005-12-31', 0.006], # pahkaoja 2344 ha
['20', '2005-01-01', '2015-12-31', '2005-12-31', 0.009], # vaarajoki 1900 ha
['21', '2005-01-01', '2015-12-31', '2005-12-31', 0.01], # myllypuro 1053 ha
# ['22', '2005-01-01', '2015-12-31', '2005-12-31', 0.0095], # vaha-askanjoki 1600 ha
# [ '23','2011-01-01', '2015-12-31', '2011-12-31'], # ylijoki 5600 ha, very large and slow
['24', '2005-01-01', '2015-12-31', '2005-12-31', 0.0066], # kotioja 1800 ha
['25', '2005-01-01', '2015-12-31', '2005-12-31', 0.0095], # kohisevanpuro 1070 ha
['26', '2005-01-01', '2015-12-31', '2005-12-31', 0.02], # iittovuoma 1160 ha
['27', '2005-01-01', '2015-12-31', '2005-12-31', 0.014], # laanioja 1362 ha
['28', '2013-01-01', '2015-12-31', '2013-12-31', 0.0057], # kroopinsuo 179 ha
['29', '2012-01-01', '2015-12-31', '2012-12-31', 0.0089], # surnui 71 ha, poor data quality
# ['30', '2011-01-01', '2015-12-31', '2011-12-31', 0.0064], # pakopirtti 795 ha, uncertain catchment boundaries
['31', '2011-01-01', '2015-12-31', '2011-12-31', 0.0064], # ojakorpi 33 ha
['32', '2011-01-01', '2015-12-31', '2011-12-31', 0.0077], # rantainrahka 38 ha
['33', '2005-01-01', '2015-12-31', '2005-12-31', 0.009], # kivipuro 54 ha
]
ch = [1, 2, 3, 10, 11, 13, 14, 16, 17, 19, 20, 21, 24, 25, 26, 27, 28, 29,
31, 32, 33]
# read chdata from csv file
chdata = pd.read_csv('sve_catchment_characteristics.csv', sep=';', encoding='ansi')
# read data into pickle
d0 = pickle.load(open('R-base.pkl', 'rb')) # baseline
d1 = pickle.load(open('R-lowET.pkl', 'rb')) # low et scenario
d2 = pickle.load(open('R-hiET.pkl', 'rb')) # hi et scenario
dat0 = []; dat1 = []; dat2 = []
for k in range(len(d0)):
dat0.append(pd.DataFrame(data=d0[k], columns=d0[k].keys(), index=d0[k]['Qmeas'].index))
dat1.append(pd.DataFrame(data=d1[k], columns=d1[k].keys(), index=d1[k]['Qmeas'].index))
dat2.append(pd.DataFrame(data=d2[k], columns=d2[k].keys(), index=d2[k]['Qmeas'].index))
del d0, d1, d2
#%%
# compute annual and long-term ET, Q, P and Qm
An = pd.DataFrame(columns=['year', 'ch', 'Prec', 'Qmeas', 'Qt', 'Qt_hi', 'Qt_lo', 'ET', 'ET_hi', 'ET_lo', 'ET0', 'fsnow'], dtype=np.float)
A = pd.DataFrame(columns=['year', 'ch', 'Prec', 'Qmeas', 'Qt', 'Qt_hi', 'Qt_lo', 'ET', 'ET_hi', 'ET_lo', 'ET0', 'fsnow'], dtype=np.float)
for k in range(0, len(ch)):
# compute Equilibrium ET and fraction of P falling as snow
rn = 0.7 * dat0[k]['Rg'].values
ta = dat0[k]['Ta'].values
Eo = np.zeros(len(ta))
for nn in range(0, len(ta)):
Eo[nn] = dt * eq_evap(rn[nn], ta[nn], units='mm') # mm/d
""" compute fraction of P as snow """
prec = dat0[k]['Prec'].values
# ---state of precipitation [as water (fW) or as snow(fS)]
Tmax = 1.0; Tmin = 0.0
fW = np.zeros(len(ta))
fS = np.zeros(len(ta))
fW[ta >= Tmax] = 1.0
fS[ta <= Tmin] = 1.0
ix = np.where((ta > Tmin) & (ta < Tmax))
fW[ix] = (ta[ix] - Tmin) / (Tmax - Tmin)
fS[ix] = 1.0 - fW[ix]
fsnow = fS * prec
del ix, fW, fS
dat0[k]['ET0'] = Eo
dat0[k]['fsnow'] = fsnow
y0 = dat0[k][['Prec', 'Qmeas', 'Qt', 'ET', 'Tr', 'Ef', 'E', 'ET0', 'fsnow']].resample('a', how=sum)
y1 = dat1[k][['Qt', 'ET']].resample('a', how=sum)
y2 = dat2[k][['Qt', 'ET']].resample('a', how=sum)
y0[['Qt_hi', 'ET_lo']] = y1[['Qt', 'ET']]
y0[['Qt_lo', 'ET_hi']] = y2[['Qt', 'ET']]
y0['ch'] = ch[k]
y0['year'] = y0.index.year
An = pd.concat([An, y0])
del y0, y1, y2, Eo, fsnow
An = An.reset_index(drop=True)
#%% filter bad years with poor Qmeas or very suspicious Q/P -ratios
An = An.dropna(axis=0, how='any')
ix = An.Qmeas.values / An.Prec.values
f = (ix > 0.15) & (ix < 0.85) # removes outliers due to anomalously low Qmeas
An = An.iloc[f,:]
del f
f = An[(An.ch == 2) & (An.year == 2007)].index.values
An = An.drop(f)
f = An[(An.ch == 10) & (An.year <= 2012)].index.values
An = An.drop(f)
f = An[(An.ch == 14) & (An.year == 2006)].index.values
An = An.drop(f)
f = An[(An.ch == 16) & (An.year == 2006)].index.values
An = An.drop(f)
f = An[((An.ch == 19) & (An.year == 2007)) | \
(An.ch == 19) & (An.year == 2012)].index.values
An = An.drop(f)
f = An[(An.ch == 21) & (An.year == 2011)].index.values
An = An.drop(f)
f = An[(An.ch == 29) & (An.year < 2015)].index.values
An = An.drop(f)
f = An[(An.ch == 32) & (An.year == 2012)].index.values
An = An.drop(f)
f = An[(An.ch == 32) & (An.year == 2012)].index.values
An = An.drop(f)
f = An[((An.ch == 33) & (An.year == 2010)) | \
(An.ch == 33) & (An.year == 2013)].index.values
An = An.drop(f)
# bad catchments, measurements are very poor quality
#f = An[(An.ch == 22) | (An.ch == 30)].index.values
#An = An.drop(f)
del f
# remove same from chdata
f = chdata[(chdata.id == 22) | (chdata.id == 30)].index.values
chdata = chdata.drop(f)
del f
chdata = chdata.rename(columns={'id': 'ch'})
#chdata.index = chdata['ch']
# compute 'new' variables
dA = 0.1 # catchment area uncertainty
dP = 0.05 # precipitation uncertainty
An['Qm_P'] = An.Qmeas / An.Prec
An['Qm_P_lo'] = An['Qm_P'] * (1. - dA) / (1. + dP)
An['Qm_P_hi'] = An['Qm_P'] * (1. + dA) / (1. - dP)
An['Qt_P'] = An.Qt / An.Prec
An['Qt_P_lo'] = An.Qt_lo / An.Prec
An['Qt_P_hi'] = An.Qt_hi / An.Prec
# same for ET
An['ETm_P'] = 1.0 - An.Qm_P
An['ETm_P_lo'] = 1.0 - An.Qm_P_hi
An['ETm_P_hi'] = 1.0 - An.Qm_P_lo
# modeled ET
An['ET_P'] = 1.0 - An.Qt_P
An['ET_P_lo'] = 1.0 - An.Qt_P_hi
An['ET_P_hi'] = 1.0 - An.Qt_P_lo
# ET0
An['ET0_P'] = An.ET0 / An.Prec
An['alpha_m'] = An.ETm_P / An.ET0_P
An['alpha'] = An.ET_P / An.ET0_P
# merge An and chdata
An = pd.merge(An, chdata, on='ch')
# compute catchment averages
A = An.groupby(['ch']).mean()
##%% plot scatterplot of streamflow & ET
#plt.figure()
#x = An.Qmeas.values
#y = An.Qt.values
#s, s0, r, p, se = stats.linregress(x, y)
#x0 = np.array([min(x), max(x)])
#m = s*x0 + s0
#txt = []
#for k in range(0, len(An)):
# txt.append( '%s-%d' % (An['ch'].iloc[k], An['year'].iloc[k]))
#plt.subplot(121)
#plt.scatter(x, y, c='r', alpha=0.5)
#for t, i, j in zip(txt, x, y):
# plt.annotate(t, xy = (i, j), xytext = (0, 0), textcoords='offset points', fontsize=6)
#plt.xlim([100, 800])
#plt.ylim([100, 800])
#plt.plot([100, 800], [100, 800], 'k--')
#plt.plot(x0, m, 'k-')
#plt.title('slope=%.2f +/- %.2f, R2=%.2f' % (s, 2*se, r**2))
#plt.subplot(122)
#
#x1 = An.Qmeas / An.Prec
#y1 = An.Qt / An.Prec
#s, s0, r, p, se = stats.linregress(x1, y1)
#x0 = np.array([min(x1), max(x1)])
#m = s*x0 + s0
#plt.scatter(x1, y1, c='g', alpha=0.5)
#for t, i, j in zip(txt, x1, y1):
# plt.annotate(t, xy = (i, j), xytext = (0, 0), textcoords='offset points', fontsize=6)
#plt.plot(x0, m, 'k-')
#plt.title('slope=%.2f +/- %.2f, R2=%.2f' % (s, 2*se, r**2))
#plt.xlim([0, 1])
#plt.ylim([0, 1])
#plt.plot([0, 1], [0, 1], 'k--')
#
#%% plot ET / P with errorbars
gp = An.groupby('ch')
x = An.ETm_P.values
y = An.ET_P.values
# estimate slope and r2 for line forced through origin
mm = y[:, np.newaxis]
slp, res, _, _ = np.linalg.lstsq(mm, x)
r2 = 1 - res / sum((y - np.mean(y))**2)
# estimate linear regression with uncertainty
# for computing slopes, set origin to mean(x), mean(y)
xx = x - np.mean(x)
yy = y - np.mean(y)
s, s0, r, p, se = stats.linregress(xx, yy)
r2 = r**2
rmse = np.sqrt(((y - x) ** 2).mean())
me = np.mean(y - x)
x0 = np.array([min(x)-0.05, max(x)+0.05])
xx = np.array([min(xx)-0.05, max(xx)+0.05])
m = s*xx + s0 + np.mean(y)
ml = (s + 2*se)*xx + np.mean(y)
mh = (s - 2*se)*xx+ np.mean(y)
tstr = 's = %.2f$\pm$%.2f\nR$^2$ = %.2f\nRMSE = %.2f\nME = %.2f' % (s, 2*se, r2, rmse, me)
n = len(np.unique(An.ch))
fig, ax = plt.subplots()
plt.plot([0, 1], [0, 1], 'k--', alpha=0.3, linewidth=1)
colors = plt.cm.tab20c(np.linspace(0.01, 0.99, n))
ax.set_prop_cycle('color', colors)
ax.set_aspect('equal')
for name, group in gp:
yerr = abs(np.array([group.ET_P_hi.values - group.ET_P.values, group.ET_P_lo.values - group.ET_P.values]))
xerr = abs(np.array([group.ETm_P_hi.values - group.ETm_P.values, group.ETm_P_lo.values - group.ETm_P.values]))
ax.errorbar(1 - group.Qm_P, 1 - group.Qt_P, fmt='o', xerr=xerr, yerr=yerr, label=name,
alpha=0.8, linewidth=1)
ax.legend(numpoints=1, loc='upper right', fontsize=8)
plt.plot(x0, m, 'k-', zorder=100)
plt.plot(x0, ml, 'k--', x0, mh, 'k--', linewidth=1, zorder=50)
plt.axis([0, 1.0, 0, 1.0])
plt.ylabel(r'$ \langle \overline{ET}_{mod} \, / \, \overline{P} \rangle $')
plt.xlabel(r'$ \langle \overline{ET}_{wb} \, / \, \overline{P} \rangle $')
plt.text(0.05, 0.8, tstr, fontsize=10)
plt.show()
#plt.savefig(os.path.join(fig_path, 'Fig_5b_cathcmentET_to_P.png'), dpi=600)
#plt.savefig(os.path.join(fig_path, 'Fig_5b_cathcmentET_to_P.pdf'))
#%% statistics for catchment 'means
x = A.ETm_P.values
y = A.ET_P.values
# estimate slope and r2 for line forced through origin
mm = y[:, np.newaxis]
slp, res, _, _ = np.linalg.lstsq(mm, x)
r2 = 1 - res / sum((y - np.mean(y))**2)
# estimate linear regression with uncertainty
# for computing slopes, set origin to mean(x), mean(y)
xx = x - np.mean(x)
yy = y - np.mean(y)
s, s0, r, p, se = stats.linregress(xx, yy)
r2 = r**2
rmse = np.sqrt(((y - x) ** 2).mean())
me = np.mean(y - x)
print('s',s, '2xse', 2*se, 'r2', r2, 'rmse', rmse, 'me', me)
#%% plot ET / P comparison to ET0/P
#A = pd.DataFrame(columns=['year', 'ch', 'Prec', 'Qmeas', 'Qt', 'Qt_hi', 'Qt_lo', 'ET', 'ET_hi', 'ET_lo', 'ET0', 'fsnow'])
""" this is for comparing ET in budyko-framework """
dA = 0.1 # catchment area uncertainty
dP = 0.05 # precipitation uncertainty
An['Qm_P_lo'] = An['Qm_P'] * (1. - dA) / (1. + dP)
An['Qm_P_hi'] = An['Qm_P'] * (1. + dA) / (1. - dP)
An['ETm_P'] = 1.0 - An.Qm_P
An['ETm_P_lo'] = 1.0 - An.Qm_P_hi
An['ETm_P_hi'] = 1.0 - An.Qm_P_lo
# modeled ET
An['ET_P'] = 1.0 - An.Qt_P
An['ET_P_lo'] = 1.0 - An.Qt_P_hi
An['ET_P_hi'] = 1.0 - An.Qt_P_lo
# ET0
An['ET0_P'] = An.ET0 / An.Prec
gp = An.groupby('ch')
n = len(np.unique(An.ch))
fig, ax = plt.subplots(2,1)
fig.set_size_inches(6, 11)
#plt.plot([0, 1], [0, 1], 'k--', alpha=0.3, linewidth=1)
colors = plt.cm.tab20c(np.linspace(0.01, 0.99, n))
ax[0].set_prop_cycle('color', colors)
ax[0].set_aspect('equal')
ax[1].set_prop_cycle('color', colors)
ax[1].set_aspect('equal')
for name, group in gp:
x = group.ET0_P
#plt.subplot(121)
y1 = group.ETm_P
yerr1 = abs(np.array([group.ETm_P_lo.values - group.ETm_P.values, group.ETm_P_hi.values - group.ETm_P.values]))
# xerr = abs(np.array([group.Qm_P_lo.values - group.Qm_P.values, group.Qm_P_hi.values - group.Qm_P.values]))
ax[0].errorbar(x, y1, fmt='o', yerr=yerr1, label=name, alpha=0.7)
ax[0].plot([0, 1], [0, 1], 'k--', alpha=0.5, linewidth=0.5)
y2 = group.ET_P
yerr2 = abs(np.array([group.ET_P_lo.values - group.ET_P.values, group.ET_P_hi.values - group.ET_P.values]))
ax[1].errorbar(x, y2, fmt='o', yerr=yerr1, label=name, alpha=0.7)
ax[1].plot([0, 1], [0, 1], 'k--', alpha=0.5, linewidth=0.5)
ax[0].legend(numpoints=1, loc='upper left', fontsize=8)
#plt.plot(x0, m, 'k-', zorder=100)
ax[0].axis([0, 1.8, 0, 1.0])
ax[0].set_ylabel(r'$\overline{ET}_{obs} \, / \, \overline{P}$ (-)')
ax[0].set_xlabel(r'$\overline{ET_0} \, / \, \overline{P}$ (-)')
ax[1].axis([0, 1.8, 0, 1.0])
ax[1].set_ylabel(r'$\overline{ET}_{mod} \, / \, \overline{P}$ (-)')
ax[1].set_xlabel(r'$\overline{ET_0} \, / \, \overline{P}$ (-)')
#plt.text(0.05, 0.8, tstr, fontsize=10)
plt.show()
#plt.savefig(os.path.join(fig_path, 'Fig_5b_cathcmentET_to_P.png'), dpi=600)
#plt.savefig(os.path.join(fig_path, 'Fig_5b_cathcmentET_to_P.pdf'))
#%% plot ET / P comparison to ET0/P
# As above but with color = ET0 or LAT and size = LAI
""" this is for comparing ET in budyko-framework """
gp = An.groupby('LAT_deg')
n = len(np.unique(An.LAT_deg))
fig, ax = plt.subplots(2,1)
fig.set_size_inches(6, 11)
#plt.plot([0, 1], [0, 1], 'k--', alpha=0.3, linewidth=1)
colors = plt.cm.RdBu(np.linspace(0.01, 0.99, n))
ax[0].set_prop_cycle('color', colors)
ax[0].set_aspect('equal')
ax[1].set_prop_cycle('color', colors)
ax[1].set_aspect('equal')
for name, group in gp:
x = group.ET0_P
#plt.subplot(121)
y1 = group.ETm_P
yerr1 = abs(np.array([group.ETm_P_lo.values - group.ETm_P.values, group.ETm_P_hi.values - group.ETm_P.values]))
# xerr = abs(np.array([group.Qm_P_lo.values - group.Qm_P.values, group.Qm_P_hi.values - group.Qm_P.values]))
ax[0].errorbar(x, y1, fmt='o', yerr=yerr1, label=name, alpha=0.7)
ax[0].plot([0, 1], [0, 1], 'k--', alpha=0.5, linewidth=0.5)
y2 = group.ET_P
yerr2 = abs(np.array([group.ET_P_lo.values - group.ET_P.values, group.ET_P_hi.values - group.ET_P.values]))
ax[1].errorbar(x, y2, fmt='o', yerr=yerr1, label=name, alpha=0.7)
ax[1].plot([0, 1], [0, 1], 'k--', alpha=0.5, linewidth=0.5)
ax[0].legend(numpoints=1, loc='upper left', fontsize=8)
#plt.plot(x0, m, 'k-', zorder=100)
ax[0].axis([0, 1.8, 0, 1.0])
ax[0].set_ylabel(r'$\overline{ET}_{obs} \, / \, \overline{P}$ (-)')
ax[0].set_xlabel(r'$\overline{ET_0} \, / \, \overline{P}$ (-)')
ax[1].axis([0, 1.8, 0, 1.0])
ax[1].set_ylabel(r'$\overline{ET}_{mod} \, / \, \overline{P}$ (-)')
ax[1].set_xlabel(r'$\overline{ET_0} \, / \, \overline{P}$ (-)')
#plt.text(0.05, 0.8, tstr, fontsize=10)
plt.show()
#plt.savefig(os.path.join(fig_path, 'Fig_5b_cathcmentET_to_P.png'), dpi=600)
#plt.savefig(os.path.join(fig_path, 'Fig_5b_cathcmentET_to_P.pdf'))
#%% what explains ET/ETo variability?
B = An.sort_values('LAT_deg', axis=0)
relsize = np.array(B.LAI / max(B.LAI))
marksize = 10 + 150*relsize
rr = B.LAT_deg
yerr = abs(np.array([B.ET_P_hi.values - B.ET_P.values, B.ET_P_lo.values - B.ET_P.values]))
xerr = abs(np.array([B.ETm_P_hi.values - B.ETm_P.values, B.ETm_P_lo.values - B.ETm_P.values]))
fig1, ax1 = plt.subplots(1,1)
fig1.set_size_inches(6, 6)
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
ax1.set_aspect('equal')
# ax1[1].set_aspect('equal')
# P/E scatterplot
x = B.ETm_P
y = B.ET_P
xx = x - np.mean(x)
yy = y - np.mean(y)
s, s0, r, p, se = stats.linregress(xx, yy)
r2 = r**2
rmse = np.sqrt(((y - x) ** 2).mean())
me = np.mean(y - x)
x0 = np.array([min(x)-0.05, max(x)+0.05])
xx = np.array([min(xx)-0.05, max(xx)+0.05])
m = s*xx + s0 + np.mean(y)
ml = (s + 2*se)*xx + np.mean(y)
mh = (s - 2*se)*xx+ np.mean(y)
tstr = 's = %.2f$\pm$%.2f\nR$^2$ = %.2f\nRMSE = %.2f\nME = %.2f' % (s, 2*se, r2, rmse, me)
#for j in range(len(x)):
# ax1[0].errorbar(x[j], y[j], yerr[j], xerr[j], marker='None', mec='k', mfc='k', alpha=0.9, zorder=-10)
gp = B.groupby('LAT_deg')
n = len(np.unique(B.LAT_deg))
colors = plt.cm.RdBu(np.linspace(0.01, 0.99, n))
#ax1[0].set_prop_cycle('color', colors)
ax1.set_aspect('equal')
j = 0
for name, group in gp:
x1 = group.ETm_P
y1 = group.ET_P
yerr1 = abs(np.array([group.ET_P_hi.values - group.ET_P.values, group.ET_P_lo.values - group.ET_P.values]))
xerr1 = abs(np.array([group.ETm_P_hi.values - group.ETm_P.values, group.ETm_P_lo.values - group.ETm_P.values]))
# xerr1 = abs(np.array([group.ETm_P_lo.values - group.ETm_P.values, group.ETm_P_hi.values - group.ETm_P.values]))
# yerr1 = abs(np.array([group.ET_P_lo.values - group.ET_P.values, group.ET_P_hi.values - group.ET_P.values]))
ax1.errorbar(x1, y1, yerr=yerr1, xerr=xerr1, fmt='None', ecolor=colors[j], alpha=0.6, zorder=-10, linewidth=1)
j += 1
#ax1[0].errorbar(x, y, yerr=yerr, xerr=xerr, fmt='None', ecolor='k', alpha=0.3, zorder=-10)
ax1.plot([0, 1], [0, 1], 'k--', alpha=0.4, linewidth=1)
sc = ax1.scatter(x, y, c=rr, edgecolor = 'k', s=marksize, alpha=0.6, cmap='RdBu')
ax1.text(0.05, 0.75, tstr, fontsize=10)
cb = fig.colorbar(sc, cax=cax, orientation='vertical')
ax1.plot(x0, m, 'k-', zorder=100)
ax1.plot(x0, ml, 'k--', x0, mh, 'k--', linewidth=1, zorder=50)
cb.set_label('LAT (deg)', rotation=90, fontsize=9)
ax1.axis([0, 1.0, 0, 1.0])
ax1.set_xlabel(r'$ \langle \overline{ET}_{wb} \, / \, \overline{P} \rangle $')
ax1.set_ylabel(r'$ \langle \overline{ET}_{mod} \, / \, \overline{P} \rangle $')
plt.show()
plt.savefig('cathcmentET.png', dpi=600)
plt.savefig('cathcmentET.pdf')
#%% E/P vs Eo/P scatterplot
# E/P vs Eo/P scatterplot Modeled values
fig2, ax2 = plt.subplots(1,1)
fig2.set_size_inches(6, 6)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes('right', size='5%', pad=0.05)
ax2.set_aspect('equal')
x = B.ET0_P
y = B.ETm_P
sc2=ax2.scatter(x, y, c=rr, edgecolor = 'k', s=marksize, alpha=0.6, cmap='RdBu')
cb2 = fig.colorbar(sc2, cax=cax2, orientation='vertical')
cb2.set_label('LAT (deg)', rotation=90, fontsize=9)
ax2.set_xlim([0, 1.3])
ax2.set_ylim([0, 1.0])
ax2.plot([0, 1], [0, 1], 'k--', alpha=0.4, linewidth=1)
ax2.set_ylabel(r'$\overline{ET_{wb}} \, / \, \overline{P}$ (-)')
ax2.set_xlabel(r'$\overline{ET_0} \, / \, \overline{P}$ (-)')
plt.show()
plt.savefig(os.path.join(fig_path, 'Fig6_cathcmentET_Budyko.png'), dpi=600)
plt.savefig(os.path.join(fig_path, 'Fig6_cathcmentET_Budyko.pdf'))
#%% regression model to test ET relations to climatic and catchment variables
del y, x
#y = An.alpha_m
y = An.ET_P * An.Prec.values
y.index = An.ch
x = An[['ET0', 'Prec', 'fsnow', 'LAI', 'f_decid', 'peat']].copy()
x['LAI'] = np.sqrt(x['LAI'])
x.index = An.ch
model = smf.MixedLM(y, x, groups=y.index)
result = model.fit()
print(result.summary())
# simple multiple regression
y = An.alpha_m
x = An[['ET0', 'Prec', 'fsnow', 'LAI', 'f_decid', 'peat']].copy()
x['LAI'] = np.sqrt(x['LAI'])
x = sm.add_constant(x)
est = sm.OLS(y, x).fit()
est.summary()
xx = A[['LAT_deg', 'ET0', 'Prec', 'fsnow', 'LAI', 'f_decid', 'peat', 'Tr', 'Ef', 'E']].copy()
# xx['LAI'] = np.sqrt(xx['LAI'])
print('means', xx.corr())
xxx = An[['LAT_deg', 'ET0', 'Prec', 'fsnow', 'LAI', 'f_decid', 'peat', 'Tr', 'Ef', 'E']].copy()
# xxx['LAI'] = np.sqrt(xxx['LAI'])
print('raw', xxx.corr())
#xm = sm.add_constant(xm)
#estm = sm.OLS(ym, xm).fit()
#estm.summary()
#A = pd.DataFrame(columns=['year', 'ch', 'Prec', 'Qmeas', 'Qt', 'Qt_hi', 'Qt_lo', 'ET', 'ET_hi', 'ET_lo', 'ET0', 'fsnow'])
#%% multiple regression for model-data residuals
# simple multiple regression
y = An.ET_P - An.ETm_P
x = An[['LAI', 'TWI', 'TWI_b', 'f_decid', 'fine', 'LAT_deg',
'LON_deg', 'med', 'coarse', 'peat', 'top_m', 'area']].copy()
# x['LAI'] = np.sqrt(x['LAI'])
x = sm.add_constant(x)
est = sm.OLS(y, x).fit()
print(est.summary())
model = smf.MixedLM(y, x, groups=y.index)
result = model.fit()
print(result.summary())
| 33.633871
| 138
| 0.601544
|
0c4e596555d8da634a0f039681e2a438ec8ebf98
| 499
|
py
|
Python
|
apps/userprofile/models.py
|
SteinOveHelset/minutos
|
73cdcb44409f97b05680c6a048f80eb4bd3f1f46
|
[
"MIT"
] | 23
|
2020-11-19T19:33:34.000Z
|
2022-03-02T15:43:08.000Z
|
apps/userprofile/models.py
|
SteinOveHelset/minutos
|
73cdcb44409f97b05680c6a048f80eb4bd3f1f46
|
[
"MIT"
] | 1
|
2021-04-03T18:02:50.000Z
|
2021-04-03T18:02:50.000Z
|
apps/userprofile/models.py
|
SteinOveHelset/minutos
|
73cdcb44409f97b05680c6a048f80eb4bd3f1f46
|
[
"MIT"
] | 2
|
2021-01-23T02:06:59.000Z
|
2021-09-09T04:58:09.000Z
|
#
#
from django.contrib.auth.models import User
from django.db import models
#
# Models
class Userprofile(models.Model):
user = models.OneToOneField(User, related_name='userprofile', on_delete=models.CASCADE)
active_team_id = models.IntegerField(default=0)
avatar = models.ImageField(upload_to='uploads/avatars/', blank=True, null=True)
def get_avatar(self):
if self.avatar:
return self.avatar.url
else:
return '/static/images/avatar.png'
| 24.95
| 91
| 0.695391
|
2ba827966fe6c1fcede90c376b3847affbdb2b0d
| 7,363
|
py
|
Python
|
examples/inverse/psf_ctf_label_leakage.py
|
jhouck/mne-python
|
95facbd1a28e471cf81e1d86735fa272a66d13d1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/inverse/psf_ctf_label_leakage.py
|
jhouck/mne-python
|
95facbd1a28e471cf81e1d86735fa272a66d13d1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/inverse/psf_ctf_label_leakage.py
|
jhouck/mne-python
|
95facbd1a28e471cf81e1d86735fa272a66d13d1
|
[
"BSD-3-Clause"
] | 4
|
2021-09-08T14:35:26.000Z
|
2022-02-25T22:34:52.000Z
|
"""
============================================================
Visualize source leakage among labels using a circular graph
============================================================
This example computes all-to-all pairwise leakage among 68 regions in
source space based on MNE inverse solutions and a FreeSurfer cortical
parcellation. Label-to-label leakage is estimated as the correlation among the
labels' point-spread functions (PSFs). It is visualized using a circular graph
which is ordered based on the locations of the regions in the axial plane.
"""
# Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD-3-Clause
# %%
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (read_inverse_operator,
make_inverse_resolution_matrix,
get_point_spread)
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
# %%
# Load forward solution and inverse operator
# ------------------------------------------
#
# We need a matching forward solution and inverse operator to compute
# resolution matrices for different methods.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif'
forward = mne.read_forward_solution(fname_fwd)
# Convert forward solution to fixed source orientations
mne.convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=False)
inverse_operator = read_inverse_operator(fname_inv)
# Compute resolution matrices for MNE
rm_mne = make_inverse_resolution_matrix(forward, inverse_operator,
method='MNE', lambda2=1. / 3.**2)
src = inverse_operator['src']
del forward, inverse_operator # save memory
# %%
# Read and organise labels for cortical parcellation
# --------------------------------------------------
#
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
n_labels = len(labels)
label_colors = [label.color for label in labels]
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# %%
# Compute point-spread function summaries (PCA) for all labels
# ------------------------------------------------------------
#
# We summarise the PSFs per label by their first five principal components, and
# use the first component to evaluate label-to-label leakage below.
# Compute first PCA component across PSFs within labels.
# Note the differences in explained variance, probably due to different
# spatial extents of labels.
n_comp = 5
stcs_psf_mne, pca_vars_mne = get_point_spread(
rm_mne, src, labels, mode='pca', n_comp=n_comp, norm=None,
return_pca_vars=True)
n_verts = rm_mne.shape[0]
del rm_mne
# %%
# We can show the explained variances of principal components per label. Note
# how they differ across labels, most likely due to their varying spatial
# extent.
with np.printoptions(precision=1):
for [name, var] in zip(label_names, pca_vars_mne):
print(f'{name}: {var.sum():.1f}% {var}')
# %%
# The output shows the summed variance explained by the first five principal
# components as well as the explained variances of the individual components.
#
# Evaluate leakage based on label-to-label PSF correlations
# ---------------------------------------------------------
#
# Note that correlations ignore the overall amplitude of PSFs, i.e. they do
# not show which region will potentially be the bigger "leaker".
# get PSFs from Source Estimate objects into matrix
psfs_mat = np.zeros([n_labels, n_verts])
# Leakage matrix for MNE, get first principal component per label
for [i, s] in enumerate(stcs_psf_mne):
psfs_mat[i, :] = s.data[:, 0]
# Compute label-to-label leakage as Pearson correlation of PSFs
# Sign of correlation is arbitrary, so take absolute values
leakage_mne = np.abs(np.corrcoef(psfs_mat))
# Save the plot order and create a circular layout
node_order = lh_labels[::-1] + rh_labels # mirror label order across hemis
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 200 strongest connections.
fig = plt.figure(num=None, figsize=(8, 8), facecolor='black')
plot_connectivity_circle(leakage_mne, label_names, n_lines=200,
node_angles=node_angles, node_colors=label_colors,
title='MNE Leakage', fig=fig)
# %%
# Most leakage occurs for neighbouring regions, but also for deeper regions
# across hemispheres.
#
# Save the figure (optional)
# --------------------------
#
# Matplotlib controls figure facecolor separately for interactive display
# versus for saved figures. Thus when saving you must specify ``facecolor``,
# else your labels, title, etc will not be visible::
#
# >>> fname_fig = data_path + '/MEG/sample/plot_label_leakage.png'
# >>> fig.savefig(fname_fig, facecolor='black')
#
# Plot PSFs for individual labels
# -------------------------------
#
# Let us confirm for left and right lateral occipital lobes that there is
# indeed no leakage between them, as indicated by the correlation graph.
# We can plot the summary PSFs for both labels to examine the spatial extent of
# their leakage.
# left and right lateral occipital
idx = [22, 23]
stc_lh = stcs_psf_mne[idx[0]]
stc_rh = stcs_psf_mne[idx[1]]
# Maximum for scaling across plots
max_val = np.max([stc_lh.data, stc_rh.data])
# %%
# Point-spread function for the lateral occipital label in the left hemisphere
brain_lh = stc_lh.plot(subjects_dir=subjects_dir, subject='sample',
hemi='both', views='caudal',
clim=dict(kind='value',
pos_lims=(0, max_val / 2., max_val)))
brain_lh.add_text(0.1, 0.9, label_names[idx[0]], 'title', font_size=16)
# %%
# and in the right hemisphere.
brain_rh = stc_rh.plot(subjects_dir=subjects_dir, subject='sample',
hemi='both', views='caudal',
clim=dict(kind='value',
pos_lims=(0, max_val / 2., max_val)))
brain_rh.add_text(0.1, 0.9, label_names[idx[1]], 'title', font_size=16)
# %%
# Both summary PSFs are confined to their respective hemispheres, indicating
# that there is indeed low leakage between these two regions.
| 38.752632
| 79
| 0.678392
|
460564d107a8f792884dbd1eca90abd0ad46a3ee
| 113
|
py
|
Python
|
build/lib/fbbotw/__init__.py
|
JoabMendes/fb_bot_wrapper
|
312aa9a1cf5e53f70f2c7f67e1a53649e20966b7
|
[
"MIT"
] | 19
|
2016-11-07T20:46:13.000Z
|
2019-11-27T18:17:44.000Z
|
fbbotw/__init__.py
|
JoabMendes/fbbotw
|
312aa9a1cf5e53f70f2c7f67e1a53649e20966b7
|
[
"MIT"
] | 20
|
2016-11-02T16:02:39.000Z
|
2018-06-08T18:40:20.000Z
|
fbbotw/__init__.py
|
JoabMendes/fb_bot_wrapper
|
312aa9a1cf5e53f70f2c7f67e1a53649e20966b7
|
[
"MIT"
] | 7
|
2017-07-26T08:43:15.000Z
|
2021-05-10T12:47:33.000Z
|
# -*- coding: utf-8 -*-
__author__ = """Joabe Mendes"""
__email__ = 'joabe.mdl@gmail.com'
__version__ = '2.1.3'
| 18.833333
| 33
| 0.619469
|
057b027d5710a78e6969bd6838a73fc9c5475fa4
| 2,209
|
py
|
Python
|
setup.py
|
danielrivard/innova-controls
|
698d56a748405fae2f7d9434f55b29d646ff0f75
|
[
"Apache-2.0"
] | 1
|
2022-02-27T16:14:15.000Z
|
2022-02-27T16:14:15.000Z
|
setup.py
|
danielrivard/innova-controls
|
698d56a748405fae2f7d9434f55b29d646ff0f75
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
danielrivard/innova-controls
|
698d56a748405fae2f7d9434f55b29d646ff0f75
|
[
"Apache-2.0"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
from setuptools import setup
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="innova-controls",
version="1.0.8",
description="Innova Air Conditioner Control API",
license="Apache",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/danielrivard/innova-controls",
author="Daniel Rivard",
# author_email='author@example.com', # Optional
# https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
],
keywords="development, home automation, library, innova",
package_dir={"": "src"},
py_modules=["innova_controls"],
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires=">=3.9, <4",
install_requires=["aiohttp >= 3.0.0, < 4.0.0","retry2>=0.9.3"],
project_urls={ # Optional
"Bug Reports": "https://github.com/danielrivard/innova-controls/issues",
"Source": "https://github.com/danielrivard/innova-controls/",
},
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
)
| 35.063492
| 97
| 0.651426
|
118e0b4dd8fc2a8c816d2e966d6c03f80dc440e2
| 87,583
|
py
|
Python
|
numpy/core/tests/test_multiarray.py
|
girving/numpy
|
059334c491940752b08070074cc221b69487e5e4
|
[
"BSD-3-Clause"
] | 1
|
2019-06-19T06:37:47.000Z
|
2019-06-19T06:37:47.000Z
|
numpy/core/tests/test_multiarray.py
|
girving/numpy
|
059334c491940752b08070074cc221b69487e5e4
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_multiarray.py
|
girving/numpy
|
059334c491940752b08070074cc221b69487e5e4
|
[
"BSD-3-Clause"
] | 1
|
2016-01-14T15:55:41.000Z
|
2016-01-14T15:55:41.000Z
|
import tempfile
import sys
import os
import numpy as np
from numpy.testing import *
from nose import SkipTest
from numpy.core import *
from numpy.core.multiarray_tests import test_neighborhood_iterator, test_neighborhood_iterator_oob
# Need to test an object that does not fully implement math interface
from datetime import timedelta
from numpy.compat import asbytes, getexception, strchar
from test_print import in_foreign_locale
class TestFlags(TestCase):
def setUp(self):
self.a = arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(RuntimeError, runstring, 'self.a[0] = 3', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
class TestAttributes(TestCase):
def setUp(self):
self.one = arange(10)
self.two = arange(20).reshape(4,5)
self.three = arange(60,dtype=float64).reshape(2,5,6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4,5))
assert_equal(self.three.shape, (2,5,6))
self.three.shape = (10,3,2)
assert_equal(self.three.shape, (10,3,2))
self.three.shape = (2,5,6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, dtype(int_))
assert_equal(self.three.dtype, dtype(float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return ndarray([size], buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
#self.assertRaises(ValueError, make_array, 8, 3, 0)
#self.assertRaises(ValueError, lambda: ndarray([1], strides=4))
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides=strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
assert_equal(make_array(7,3,1), array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
#self.assertRaises(ValueError, make_array, 8, 3, 0)
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = empty((3,2,1), t)
y = empty((3,2,1), t)
x.fill(1)
y[...] = 1
assert_equal(x,y)
x = array([(0,0.0), (1,1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2,3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0,1,2],[0,1,2]])
a[...] = np.arange(2).reshape(2,1)
assert_equal(a, [[0,0,0],[1,1,1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1,2,3)
assert_equal(a, [[5,4,3],[2,1,0]])
# The other type of broadcasting would require a reduction operation.
def assign(a,b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2,2,3))
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = dtype('i4')
assert_equal(d1, dtype(int32))
d2 = dtype('f8')
assert_equal(d2, dtype(float64))
class TestZeroRank(TestCase):
def setUp(self):
self.d = array(0), array('x', object)
def test_ellipsis_subscript(self):
a,b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...] is a)
self.assertTrue(b[...] is b)
def test_empty_subscript(self):
a,b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a,b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a,b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a,b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a,b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a,b = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1,1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a,b = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_constructor(self):
x = ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = ndarray((),buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = array(2)
self.assertRaises(ValueError, add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = array([0,1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape,())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape,())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1,1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1,2,3,3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0,0,1,2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3,2,1,0])
a = np.arange(6).reshape(2,3)
a[::-1,:] = a[:,::-1]
assert_equal(a, [[5,4,3],[2,1,0]])
a = np.arange(6).reshape(2,3)
a[::-1,::-1] = a[:,::-1]
assert_equal(a, [[3,4,5],[0,1,2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2,3,4,3,4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0,1,0,1,2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4,3,2,3,4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0,1,2,1,0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2,3,4,3,4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0,1,0,1,2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, array, x())
def test_from_string(self) :
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123','123']
result = array([123, 123], dtype=int)
for type in types :
msg = 'String conversion for %s' % type
assert_equal(array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, these aren't equal
assert_(np.any(a['a'].T != a.T['a']))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1,2,3],'a', [[1,2],[3,4]]),([3,3,3],'b',[[0,0],[0,0]])],
dtype=[('a', ('f4',3)), ('b', np.object), ('c', ('i4',(2,2)))])
b = a.copy()
assert_equal(a==b, [True,True])
assert_equal(a!=b, [False,False])
b[1].b = 'c'
assert_equal(a==b, [True,False])
assert_equal(a!=b, [False,True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a==b, [False,False])
assert_equal(a!=b, [True,True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i,j] = 10
assert_equal(a==b, [False,True])
assert_equal(a!=b, [True,False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)],[(1,)]],dtype=[('a','f8')])
b = np.array([(0,),(0,),(1,)],dtype=[('a','f8')])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)],[(1,)]],dtype=[('a','f8',(1,))])
b = np.array([(0,),(0,),(1,)],dtype=[('a','f8',(1,))])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))])
b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))], order='F')
b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1,2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2,1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
class TestBool(TestCase):
def test_test_interning(self):
a0 = bool_(0)
b0 = bool_(False)
self.assertTrue(a0 is b0)
a1 = bool_(1)
b1 = bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(array([True])[0] is a1)
self.assertTrue(array(True)[()] is a1)
class TestMethods(TestCase):
def test_test_round(self):
assert_equal(array([1.2,1.5]).round(), [1,2])
assert_equal(array(1.5).round(), 2)
assert_equal(array([12.2,15.5]).round(-1), [10,20])
assert_equal(array([12.15,15.51]).round(1), [12.2,15.5])
def test_transpose(self):
a = array([[1,2],[3,4]])
assert_equal(a.transpose(), [[1,3],[2,4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0,0))
self.assertRaises(ValueError, lambda: a.transpose(0,1,2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(100)
b = a[::-1].copy()
for kind in ['q','m','h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q','m','h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q','m','h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(100)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sort.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(100)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# todo, check object array sorts.
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3,2],[1,0]])
b = np.array([[1,0],[3,2]])
c = np.array([[2,3],[0,1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# using None is known fail at this point
# d = a.copy()
# d.sort(axis=None)
#assert_equal(d, c, "test sort with axis=None")
def test_sort_order(self):
# Test sorting an array with fields
x1=np.array([21,32,14])
x2=np.array(['my','first','name'])
x3=np.array([3.1,4.5,6.2])
r=np.rec.fromarrays([x1,x2,x3],names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, array([14,21,32]))
assert_equal(r.word, array(['name','my','first']))
assert_equal(r.number, array([6.2,3.1,4.5]))
r.sort(order=['word'])
assert_equal(r.id, array([32,21,14]))
assert_equal(r.word, array(['first','my','name']))
assert_equal(r.number, array([4.5,3.1,6.2]))
r.sort(order=['number'])
assert_equal(r.id, array([21,32,14]))
assert_equal(r.word, array(['my','first','name']))
assert_equal(r.number, array([3.1,4.5,6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'),('col2',strtype)]
r = np.array([('a', 1),('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(100)
b = a[::-1].copy()
for kind in ['q','m','h'] :
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q','m','h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q','m','h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(100)])
b = a[::-1].copy()
r = arange(100)
rr = r[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(100)], dtype=np.unicode)
b = a[::-1].copy()
r = arange(100)
rr = r[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# todo, check object array argsorts.
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3,2],[1,0]])
b = np.array([[1,1],[0,0]])
c = np.array([[1,0],[1,0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([np.nan, 1, 0])
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1,4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1,10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0,128],dtype='<i4')
b = a.searchsorted(np.array(128,dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0,128],dtype='>i4')
b = a.searchsorted(np.array(128,dtype='>i4'))
assert_equal(b, 1, msg)
def test_flatten(self):
x0 = np.array([[1,2,3],[4,5,6]], np.int32)
x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32)
y0 = np.array([1,2,3,4,5,6], np.int32)
y0f = np.array([1,4,2,5,3,6], np.int32)
y1 = np.array([1,2,3,4,5,6,7,8], np.int32)
y1f = np.array([1,5,3,7,2,6,4,8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
def test_ravel(self):
a = np.array([[0,1],[2,3]])
assert_equal(a.ravel(), [0,1,2,3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0,2,1,3])
assert_equal(a.ravel(order='C'), [0,1,2,3])
assert_equal(a.ravel(order='F'), [0,2,1,3])
assert_equal(a.ravel(order='A'), [0,1,2,3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0,1,2,3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0,1],[2,3]], order='F')
assert_equal(a.ravel(), [0,1,2,3])
assert_equal(a.ravel(order='A'), [0,2,1,3])
assert_equal(a.ravel(order='K'), [0,2,1,3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0,1],[2,3]])[::-1,:]
assert_equal(a.ravel(), [2,3,0,1])
assert_equal(a.ravel(order='C'), [2,3,0,1])
assert_equal(a.ravel(order='F'), [2,0,3,1])
assert_equal(a.ravel(order='A'), [2,3,0,1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2,3,0,1])
assert_(a.ravel(order='K').flags.owndata)
def test_setasflat(self):
# In this case, setasflat can treat a as a flat array,
# and must treat b in chunks of 3
a = np.arange(3*3*4).reshape(3,3,4)
b = np.arange(3*4*3, dtype='f4').reshape(3,4,3).T
assert_(not np.all(a.ravel() == b.ravel()))
a.setasflat(b)
assert_equal(a.ravel(), b.ravel())
# A case where the strides of neither a nor b can be collapsed
a = np.arange(3*2*4).reshape(3,2,4)[:,:,:-1]
b = np.arange(3*3*3, dtype='f4').reshape(3,3,3).T[:,:,:-1]
assert_(not np.all(a.ravel() == b.ravel()))
a.setasflat(b)
assert_equal(a.ravel(), b.ravel())
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1,2,3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = array([[2,9],[7,0],[3,8]])
DATA = [
carray,
transpose(carray),
array([('xxx', 1, 2.0)], dtype=[('a', (str,3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return loads(obj, encoding='latin1')
else:
return loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1,2,3,4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1,2,3,4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1,(1,2))], dtype=[('a', 'i1', (2,2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = ones((1,1))
x[:,[0]] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1,1,1))
x[:,:,[0]] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_tuple(self):
x = ones((1,1))
x[:,(0,)] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1,1,1))
x[:,:,(0,)] = 2.0
assert_array_equal(x, array([[[2.0]]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = array(["This","is","example"])
g2 = array(["This","was","example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]])
def test_mixed(self):
g1 = array(["spam","spa","spammer","and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = array([u"This",u"is",u"example"])
g2 = array([u"This",u"was",u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0,np.nan)], 4),
([0, 1, 2, 3, complex(np.nan,0)], 4),
([0, 1, 2, complex(np.nan,0), 3], 3),
([0, 1, 2, complex(0,np.nan), 3], 3),
([complex(0,np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = range(a.ndim)
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0,np.nan)], 4),
([0, 1, 2, 3, complex(np.nan,0)], 4),
([0, 1, 2, complex(np.nan,0), 3], 3),
([0, 1, 2, complex(0,np.nan), 3], 3),
([complex(0,np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = range(a.ndim)
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1,2,3], 1000)
assert_equal(np.amax([[1,2,3]], axis=1), 3)
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0,-0.1,0.1])
res = 250*sk[:,newaxis]
assert_almost_equal(res.ravel(),250*sk)
class TestClip(TestCase):
def _check_range(self,x,cmin,cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self,type_group,array_max,
clip_min,clip_max,inplace=False,
expected_min=None,expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=','>']
else:
byte_orders = ['<','=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min,clip_max,x)
else:
x = x.clip(clip_min,clip_max)
byteorder = '='
if x.dtype.byteorder == '|': byteorder = '|'
assert_equal(x.dtype.byteorder,byteorder)
self._check_range(x,expected_min,expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type('float',1024,-12.8,100.2, inplace=inplace)
self._clip_type('float',1024,0,0, inplace=inplace)
self._clip_type('int',1024,-120,100.5, inplace=inplace)
self._clip_type('int',1024,0,0, inplace=inplace)
x = self._clip_type('uint',1024,-120,100,expected_min=0, inplace=inplace)
x = self._clip_type('uint',1024,0,0, inplace=inplace)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3,0.5)
self._check_range(y,-0.3,0.5)
def test_max_or_min(self):
val = np.array([0,1,2,3,4,5,6,7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100,0,15]:
for types in np.sctypes.itervalues():
for T in types:
if T not in unchecked_types:
yield self.tst_basic,x.copy().astype(T),T,mask,val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1,2,3]), [True], 5)
def tst_byteorder(self,dtype):
x = np.array([1,2,3],dtype)
np.putmask(x,[True,False,True],-1)
assert_array_equal(x,[-1,2,-1])
def test_ip_byteorder(self):
for dtype in ('>i4','<i4'):
yield self.tst_byteorder,dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'],[True,False],10)
assert_array_equal(rec['x'],[10,5])
assert_array_equal(rec['y'],[2,4])
assert_array_equal(rec['z'],[3,3])
np.putmask(rec['y'],[True,False],11)
assert_array_equal(rec['x'],[10,5])
assert_array_equal(rec['y'],[11,4])
assert_array_equal(rec['z'],[3,3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self,x):
ind = range(x.shape[0])
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2,3,4
for types in np.sctypes.itervalues():
for T in types:
if T not in unchecked_types:
yield self.tst_basic,x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2,3,4
assert_raises(IndexError, x.take, [0,1,2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2,3,4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2,3,4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self,dtype):
x = np.array([1,2,3],dtype)
assert_array_equal(x.take([0,2,1]),[1,3,2])
def test_ip_byteorder(self):
for dtype in ('>i4','<i4'):
yield self.tst_byteorder,dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1,2,1,3,1,5]
b = [0,4,5,6,2,3]
idx = np.lexsort((b,a))
expected_idx = np.array([0,4,2,1,3,5])
assert_array_equal(idx,expected_idx)
x = np.vstack((b,a))
idx = np.lexsort(x)
assert_array_equal(idx,expected_idx)
assert_array_equal(x[1][idx],np.sort(x[1]))
class TestIO(object):
"""Test tofile, fromfile, tostring, and fromstring"""
def setUp(self):
shape = (2,4,3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:,1] = [nan, inf, -inf, nan]
self.dtype = self.x.dtype
self.filename = tempfile.mktemp()
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
#tmp_file.close()
def test_bool_fromstring(self):
v = np.array([True,False,True,False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
os.unlink(self.filename)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tostring()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tostring('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[nan, nan, nan, nan, nan, nan, nan],
sep=' ')
def test_inf(self):
self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
[inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
array([1,2,3,4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1,2,3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1.,2.,3.,4.], sep=',')
self._check_from('1,2,3,4', [1.,2.,3.,4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1,3,4,5], sep='_x_')
def test_dtype(self):
v = np.array([1,2,3,4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
os.unlink(self.filename)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self,buffer,expected,kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs),expected)
def test_ip_basic(self):
for byteorder in ['<','>']:
for dtype in [float,int,np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4,7))*5).astype(dt)
buf = x.tostring()
yield self.tst_basic,buf,x.flat,{'dtype':dt}
def test_empty(self):
yield self.tst_basic, '', np.array([]), {}
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5,5))
assert_array_equal(x.flat[:9],np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat,0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError,x.resize,(5,1))
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3,2,1)
assert_(x.shape == (3,2,1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2,3,3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3,3)))
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f',float),('i',int)])
dt.names = ['p','q']
assert_equal(dt.names,['p','q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(ValueError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1)
assert_raises(ValueError, a.__getitem__, asbytes('f1'))
assert_raises(ValueError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(ValueError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn2 = func('f2')
sfn1 = func('sf1')
b[fn2][sfn1] = 1
assert_equal(b[fn2][sfn1], 1)
assert_raises(ValueError, b[fn2].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn2].__getitem__, fnn)
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
class TestView(TestCase):
def test_basic(self):
x = np.array([(1,2,3,4),(5,6,7,8)],dtype=[('r',np.int8),('g',np.int8),
('b',np.int8),('a',np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
class TestStats(TestCase):
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1,2,3,4],[5,6,7,8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestDot(TestCase):
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in xrange(12):
dot(f,v,r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f,v,out=None)
assert_array_equal(r2, r)
assert_(r is dot(f,v,out=r))
v = v[:,0].copy() # v.shape == (16,)
r = r[:,0].copy() # r.shape == (1024,)
r2 = dot(f,v)
assert_(r is dot(f,v,r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:,::2])
assert_raises(ValueError, dot, f, v, r[:,:32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2,501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*ones((3,),dtype=int)
self.y = 3*ones((3,),dtype=int)
self.x2 = 2*ones((2,3), dtype=int)
self.y2 = 3*ones((2,3), dtype=int)
self.ind = [0,0,1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2,2,3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2,2,3],[2,2,3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2,2,3],[2,2,3]])
def can_use_decimal():
try:
from decimal import Decimal
return True
except ImportError:
return False
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_simple2d_object(self):
from decimal import Decimal
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_mirror2d_object(self):
from decimal import Decimal
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_simple_object(self):
from decimal import Decimal
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_mirror_object(self):
from decimal import Decimal
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_circular_object(self):
from decimal import Decimal
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
import warnings
x = np.array([1,2])
y = np.array([1-2j,1+2j])
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1,2])
warnings.simplefilter("default", np.ComplexWarning)
if sys.version_info >= (2, 6):
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in xrange(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype([('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1,2,3,4,5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1,2],[3,4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array([(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1,2],[3,4]],), dtype=[('a', (int, (2,2)))])
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='<i4')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1,2,3], dtype='>q')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1,2,3], dtype='<q')
self._check_roundtrip(x)
else:
x = np.array([1,2,3], dtype='>q')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='<q')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_export_simple_1d(self):
x = np.array([1,2,3,4,5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, None)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, None)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, None)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array([(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, None)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:^q:dx:B:e:@H:f:=I:g:L:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:^q:dx:B:e:@H:f:=I:g:Q:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1,2],[3,4]],), dtype=[('a', ('i', (2,2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, None)
assert_equal(y.ndim, 0)
assert_equal(y.strides, None)
assert_equal(y.suboffsets, None)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1,2,3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1,2,3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_padding(self):
for j in xrange(8):
x = np.array([(1,),(2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
def test_padded_struct_array(self):
dt1 = np.dtype([('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype([('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
if __name__ == "__main__":
run_module_suite()
| 37.980486
| 587
| 0.522316
|
bc782c86914138c0fdbae5b7bc83040f92e02a28
| 2,562
|
py
|
Python
|
2021/day13/day13.py
|
fcharlier/AdventOfCode
|
6b2765da9e4d6f6b1f201897bb56043482a65bb2
|
[
"WTFPL"
] | null | null | null |
2021/day13/day13.py
|
fcharlier/AdventOfCode
|
6b2765da9e4d6f6b1f201897bb56043482a65bb2
|
[
"WTFPL"
] | null | null | null |
2021/day13/day13.py
|
fcharlier/AdventOfCode
|
6b2765da9e4d6f6b1f201897bb56043482a65bb2
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
def read_input(filename):
"""
>>> c, f = read_input('example')
>>> len(c)
18
>>> len(f)
2
>>> f[0]
('y', 7)
"""
coords = []
folds = []
with open(filename) as fd:
for line in fd:
if "," in line:
x, y = line.strip().split(",")
coords.append((int(y), int(x)))
elif "fold along" in line:
line = line.replace("fold along ", "")
axis, n = line.strip().split("=")
folds.append((axis, int(n)))
return coords, folds
def build_paper(coords):
"""
>>> ppr = build_paper(read_input('example')[0])
>>> ppr.shape
(15, 11)
>>> np.size(ppr[ppr == True])
18
"""
clists = tuple(zip(*coords))
paper = np.zeros((max(clists[0]) + 1, max(clists[1]) + 1), dtype=np.bool_)
paper[clists] = True
return paper
def fold_along(paper, fold_axis, fold_where):
"""
>>> dots, folds = read_input('example')
>>> paper = build_paper(dots)
>>> paper = fold_along(paper, *folds[0])
>>> paper.shape
(7, 11)
>>> np.size(paper[paper == True])
17
>>> paper = fold_along(paper, *folds[1])
>>> paper.shape
(7, 5)
>>> np.size(paper[paper == True])
16
"""
if fold_axis == "y":
top = paper[0:fold_where, :]
bottom = paper[fold_where + 1 :, :]
# pad the bottom part when the fold doesn't yield enought rows
if bottom.shape[0] < top.shape[0]:
bottom = np.pad(bottom, ((0, 1), (0, 0)), "constant", constant_values=False)
return top | np.flipud(bottom)
elif fold_axis == "x":
left = paper[:, 0:fold_where]
right = paper[:, fold_where + 1 :]
# pad the right part when the fold doesn't yield enought columns
if right.shape[1] < left.shape[1]:
right = np.pad(right, ((0, 0), (0, 1)), "constant", constant_values=False)
return left | np.fliplr(right)
def display_paper(paper):
for y in range(paper.shape[0]):
for x in range(paper.shape[1]):
if paper[y, x]:
print("█", end="")
else:
print("░", end="")
print("")
if __name__ == "__main__":
dots, folds = read_input("input")
paper = build_paper(dots)
paper = fold_along(paper, *folds[0])
print("Part 1: ", np.size(paper[paper == True]))
for fold in folds[1:]:
paper = fold_along(paper, *fold)
print("Part 2:")
display_paper(paper)
| 27.255319
| 88
| 0.517955
|
6c440ba84e4b5e18a8c042854a01c10c7a6a0de7
| 1,194
|
py
|
Python
|
chatbot.py
|
anchalsingh85305/Chat-Bot
|
c06cc8c5e5e122fbe0124e409e8d82d240d8093a
|
[
"MIT"
] | null | null | null |
chatbot.py
|
anchalsingh85305/Chat-Bot
|
c06cc8c5e5e122fbe0124e409e8d82d240d8093a
|
[
"MIT"
] | null | null | null |
chatbot.py
|
anchalsingh85305/Chat-Bot
|
c06cc8c5e5e122fbe0124e409e8d82d240d8093a
|
[
"MIT"
] | null | null | null |
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatterbot.trainers import ChatterBotCorpusTrainer
# Creating ChatBot Instance
chatbot = ChatBot(
'CoronaBot',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
'chatterbot.logic.MathematicalEvaluation',
'chatterbot.logic.TimeLogicAdapter',
'chatterbot.logic.BestMatch',
{
'import_path': 'chatterbot.logic.BestMatch',
'default_response': 'I am sorry, but I do not understand. I am still learning.',
'maximum_similarity_threshold': 0.90
}
],
database_uri='sqlite:///database.sqlite3'
)
# Training with Personal Ques & Ans
training_data_quesans = open('training_data/ques_ans.txt').read().splitlines()
training_data_personal = open('training_data/personal_ques.txt').read().splitlines()
training_data = training_data_quesans + training_data_personal
trainer = ListTrainer(chatbot)
trainer.train(training_data)
# Training with English Corpus Data
trainer_corpus = ChatterBotCorpusTrainer(chatbot)
trainer_corpus.train(
'chatterbot.corpus.english'
)
| 34.114286
| 93
| 0.716918
|
7b8a802d68c46aaf15545b4771790974f5c45942
| 58
|
py
|
Python
|
stocks/util/__init__.py
|
FriendlyUser/price-prediction
|
4be17ac250c8cb079cc9f8cacdc92a91e146ee9a
|
[
"Apache-2.0"
] | 1
|
2021-02-19T04:12:53.000Z
|
2021-02-19T04:12:53.000Z
|
stocks/util/__init__.py
|
FriendlyUser/price-prediction
|
4be17ac250c8cb079cc9f8cacdc92a91e146ee9a
|
[
"Apache-2.0"
] | 4
|
2020-06-17T03:29:23.000Z
|
2020-08-12T15:45:46.000Z
|
stocks/util/__init__.py
|
FriendlyUser/price-prediction
|
4be17ac250c8cb079cc9f8cacdc92a91e146ee9a
|
[
"Apache-2.0"
] | 1
|
2021-10-02T20:24:12.000Z
|
2021-10-02T20:24:12.000Z
|
from stocks.util.get_prices import get_prices, get_config
| 29
| 57
| 0.862069
|
b5c59e6f50185f4a63badfb860375959f3af4a02
| 3,240
|
py
|
Python
|
css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-ruby-tests.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 8
|
2019-04-09T21:13:05.000Z
|
2021-11-23T17:25:18.000Z
|
css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-ruby-tests.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 7
|
2019-07-08T22:23:16.000Z
|
2021-03-18T23:42:32.000Z
|
css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-ruby-tests.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T01:20:16.000Z
|
2021-11-23T17:25:02.000Z
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-ruby-001 ~ 004 which tests
emphasis marks with ruby in four directions. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-ruby-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<!-- This file was generated automatically by the script
./support/generate-text-emphasis-ruby-tests.py -->
<title>CSS Test: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="emphasis marks are drawn outside the ruby">
<link rel="match" href="text-emphasis-ruby-{index:03}-ref.html">
<p>Pass if the emphasis marks are outside the ruby:</p>
<div lang="ja" style="line-height: 5; writing-mode: {wm}; ruby-position: {ruby_pos}; text-emphasis-position: {posval}">ルビ<span style="text-emphasis: circle">と<ruby>圏<rt>けん</rt>点<rt>てん</rt></ruby>を</span>同時</div>
'''
REF_FILE = 'text-emphasis-ruby-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<!-- This file was generated automatically by the script
./support/generate-text-emphasis-ruby-tests.py -->
<title>CSS Reference: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rtc {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are outside the ruby:</p>
<div lang="ja" style="line-height: 5; writing-mode: {wm}; ruby-position: {posval}">ルビ<ruby>と<rtc>●</rtc>圏<rt>けん</rt><rtc>●</rtc>点<rt>てん</rt><rtc>●</rtc>を<rtc>●</rtc></ruby>同時</div>
'''
TEST_CASES = [
('top', 'horizontal-tb', 'over', [
('horizontal-tb', 'over right')]),
('bottom', 'horizontal-tb', 'under', [
('horizontal-tb', 'under right')]),
('right', 'vertical-rl', 'over', [
('vertical-rl', 'over right'),
('vertical-lr', 'over right')]),
('left', 'vertical-rl', 'under', [
('vertical-rl', 'over left'),
('vertical-lr', 'over left')]),
]
SUFFIXES = ['', 'a']
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for pos, ref_wm, ruby_pos, subtests in TEST_CASES:
idx += 1
ref_file = REF_FILE.format(idx)
ref_content = REF_TEMPLATE.format(pos=pos, wm=ref_wm, posval=ruby_pos)
write_file(ref_file, ref_content)
suffix = iter(SUFFIXES)
for wm, posval in subtests:
test_file = TEST_FILE.format(idx, next(suffix))
test_content = TEST_TEMPLATE.format(
wm=wm, pos=pos, index=idx, ruby_pos=ruby_pos, posval=posval)
write_file(test_file, test_content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
| 43.783784
| 211
| 0.653086
|
b77210e3d2bab36daa096ecf1b33038cb01423dd
| 14,203
|
py
|
Python
|
scripts/validate_docstrings.py
|
KneeShard/pandas
|
ce3bac9af43838c7d690ee86e9bec4976a3303e3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-10T14:05:09.000Z
|
2021-06-10T14:05:09.000Z
|
scripts/validate_docstrings.py
|
KneeShard/pandas
|
ce3bac9af43838c7d690ee86e9bec4976a3303e3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
scripts/validate_docstrings.py
|
KneeShard/pandas
|
ce3bac9af43838c7d690ee86e9bec4976a3303e3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
Analyze docstrings to detect errors.
If no argument is provided, it does a quick check of docstrings and returns
a csv with all API functions and results of basic checks.
If a function or method is provided in the form "pandas.function",
"pandas.module.class.method", etc. a list of all errors in the docstring for
the specified function or method.
Usage::
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""
from __future__ import annotations
import argparse
import doctest
import glob
import importlib
import json
import os
import subprocess
import sys
import tempfile
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
# Template backend makes matplotlib to not plot anything. This is useful
# to avoid that plot windows are open from the doctests while running the
# script. Setting here before matplotlib is loaded.
# We don't warn for the number of open plots, as none is actually being opened
os.environ["MPLBACKEND"] = "Template"
import matplotlib # isort:skip
matplotlib.rc("figure", max_open_warning=10000)
import numpy # isort:skip
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_PATH))
import pandas # isort:skip
sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext"))
from numpydoc.validate import validate, Docstring # isort:skip
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
ERROR_MSGS = {
"GL04": "Private classes ({mentioned_private_classes}) should not be "
"mentioned in public docstrings",
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
"EX02": "Examples do not pass tests:\n{doctest_log}",
"EX03": "flake8 error: {error_code} {error_message}{times_happening}",
"EX04": "Do not import {imported_library}, as it is imported "
"automatically for the examples (numpy as np, pandas as pd)",
}
def pandas_error(code, **kwargs):
"""
Copy of the numpydoc error function, since ERROR_MSGS can't be updated
with our custom errors yet.
"""
return (code, ERROR_MSGS[code].format(**kwargs))
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = "pandas"
previous_line = current_section = current_subsection = ""
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set("-"):
current_section = previous_line
continue
if set(line) == set("~"):
current_subsection = previous_line
continue
if line.startswith(".. currentmodule::"):
current_module = line.replace(".. currentmodule::", "").strip()
continue
if line == ".. autosummary::":
position = "autosummary"
continue
if position == "autosummary":
if line == "":
position = "items"
continue
if position == "items":
if line == "":
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split("."):
func = getattr(func, part)
yield (
".".join([current_module, item]),
func,
current_section,
current_subsection,
)
previous_line = line
class PandasDocstring(Docstring):
@property
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
@property
def examples_errors(self):
flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL
finder = doctest.DocTestFinder()
runner = doctest.DocTestRunner(optionflags=flags)
context = {"np": numpy, "pd": pandas}
error_msgs = ""
for test in finder.find(self.raw_doc, self.name, globs=context):
f = StringIO()
runner.run(test, out=f.write)
error_msgs += f.getvalue()
return error_msgs
@property
def examples_source_code(self):
lines = doctest.DocTestParser().get_examples(self.raw_doc)
return [line.source for line in lines]
def validate_pep8(self):
if not self.examples:
return
# F401 is needed to not generate flake8 errors in examples
# that do not user numpy or pandas
content = "".join(
(
"import numpy as np # noqa: F401\n",
"import pandas as pd # noqa: F401\n",
*self.examples_source_code,
)
)
error_messages = []
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file:
file.write(content)
file.flush()
cmd = ["python", "-m", "flake8", "--quiet", "--statistics", file.name]
response = subprocess.run(cmd, capture_output=True, text=True)
stdout = response.stdout
stdout = stdout.replace(file.name, "")
messages = stdout.strip("\n")
if messages:
error_messages.append(messages)
for error_message in error_messages:
error_count, error_code, message = error_message.split(maxsplit=2)
yield error_code, message, int(error_count)
def pandas_validate(func_name: str):
"""
Call the numpydoc validation, and add the errors specific to pandas.
Parameters
----------
func_name : str
Name of the object of the docstring to validate.
Returns
-------
dict
Information about the docstring and the errors found.
"""
doc = PandasDocstring(func_name)
result = validate(func_name)
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
result["errors"].append(
pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs))
)
if doc.see_also:
for rel_name in doc.see_also:
if rel_name.startswith("pandas."):
result["errors"].append(
pandas_error(
"SA05",
reference_name=rel_name,
right_reference=rel_name[len("pandas.") :],
)
)
result["examples_errs"] = ""
if doc.examples:
result["examples_errs"] = doc.examples_errors
if result["examples_errs"]:
result["errors"].append(
pandas_error("EX02", doctest_log=result["examples_errs"])
)
for error_code, error_message, error_count in doc.validate_pep8():
times_happening = f" ({error_count} times)" if error_count > 1 else ""
result["errors"].append(
pandas_error(
"EX03",
error_code=error_code,
error_message=error_message,
times_happening=times_happening,
)
)
examples_source_code = "".join(doc.examples_source_code)
for wrong_import in ("numpy", "pandas"):
if f"import {wrong_import}" in examples_source_code:
result["errors"].append(
pandas_error("EX04", imported_library=wrong_import)
)
return result
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
api_doc_fnames = os.path.join(BASE_PATH, "doc", "source", "reference", "*.rst")
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = pandas_validate(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
shared_code_key = doc_info["file"], doc_info["file_line"]
shared_code = seen.get(shared_code_key, "")
result[func_name].update(
{
"in_api": True,
"section": section,
"subsection": subsection,
"shared_code_with": shared_code,
}
)
seen[shared_code_key] = func_name
return result
def print_validate_all_results(
prefix: str,
errors: list[str] | None,
output_format: str,
ignore_deprecated: bool,
):
if output_format not in ("default", "json", "actions"):
raise ValueError(f'Unknown output_format "{output_format}"')
result = validate_all(prefix, ignore_deprecated)
if output_format == "json":
sys.stdout.write(json.dumps(result))
return 0
prefix = "##[error]" if output_format == "actions" else ""
exit_status = 0
for name, res in result.items():
for err_code, err_desc in res["errors"]:
if errors and err_code not in errors:
continue
sys.stdout.write(
f'{prefix}{res["file"]}:{res["file_line"]}:'
f"{err_code}:{name}:{err_desc}\n"
)
exit_status += 1
return exit_status
def print_validate_one_results(func_name: str):
def header(title, width=80, char="#"):
full_line = char * width
side_len = (width - len(title) - 2) // 2
adj = "" if len(title) % 2 == 0 else " "
title_line = f"{char * side_len} {title}{adj} {char * side_len}"
return f"\n{full_line}\n{title_line}\n{full_line}\n\n"
result = pandas_validate(func_name)
sys.stderr.write(header(f"Docstring ({func_name})"))
sys.stderr.write(f"{result['docstring']}\n")
sys.stderr.write(header("Validation"))
if result["errors"]:
sys.stderr.write(f'{len(result["errors"])} Errors found:\n')
for err_code, err_desc in result["errors"]:
if err_code == "EX02": # Failing examples are printed at the end
sys.stderr.write("\tExamples do not pass tests\n")
continue
sys.stderr.write(f"\t{err_desc}\n")
else:
sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
if result["examples_errs"]:
sys.stderr.write(header("Doctests"))
sys.stderr.write(result["examples_errs"])
def main(func_name, prefix, errors, output_format, ignore_deprecated):
"""
Main entry point. Call the validation for one or for all docstrings.
"""
if func_name is None:
return print_validate_all_results(
prefix, errors, output_format, ignore_deprecated
)
else:
print_validate_one_results(func_name)
return 0
if __name__ == "__main__":
format_opts = "default", "json", "actions"
func_help = (
"function or method to validate (e.g. pandas.DataFrame.head) "
"if not provided, all docstrings are validated and returned "
"as JSON"
)
argparser = argparse.ArgumentParser(description="validate pandas docstrings")
argparser.add_argument("function", nargs="?", default=None, help=func_help)
argparser.add_argument(
"--format",
default="default",
choices=format_opts,
help="format of the output when validating "
"multiple docstrings (ignored when validating one). "
"It can be {str(format_opts)[1:-1]}",
)
argparser.add_argument(
"--prefix",
default=None,
help="pattern for the "
"docstring names, in order to decide which ones "
'will be validated. A prefix "pandas.Series.str."'
"will make the script validate all the docstrings "
"of methods starting by this pattern. It is "
"ignored if parameter function is provided",
)
argparser.add_argument(
"--errors",
default=None,
help="comma separated "
"list of error codes to validate. By default it "
"validates all errors (ignored when validating "
"a single docstring)",
)
argparser.add_argument(
"--ignore_deprecated",
default=False,
action="store_true",
help="if this flag is set, "
"deprecated objects are ignored when validating "
"all docstrings",
)
args = argparser.parse_args()
sys.exit(
main(
args.function,
args.prefix,
args.errors.split(",") if args.errors else None,
args.format,
args.ignore_deprecated,
)
)
| 32.206349
| 85
| 0.606984
|
ffe67354e0885a09ba208e9aa9ac1db8c228d0b1
| 4,974
|
py
|
Python
|
elliot/recommender/neural/DMF/deep_matrix_factorization.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 175
|
2021-03-04T15:46:25.000Z
|
2022-03-31T05:56:58.000Z
|
elliot/recommender/neural/DMF/deep_matrix_factorization.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 15
|
2021-03-06T17:53:56.000Z
|
2022-03-24T17:02:07.000Z
|
elliot/recommender/neural/DMF/deep_matrix_factorization.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 39
|
2021-03-04T15:46:26.000Z
|
2022-03-09T15:37:12.000Z
|
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
import numpy as np
from ast import literal_eval as make_tuple
from tqdm import tqdm
from elliot.dataset.samplers import pointwise_pos_neg_ratio_ratings_sampler as pws
from elliot.recommender.neural.DMF.deep_matrix_factorization_model import DeepMatrixFactorizationModel
from elliot.recommender.recommender_utils_mixin import RecMixin
from elliot.utils.write import store_recommendation
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.base_recommender_model import init_charger
class DMF(RecMixin, BaseRecommenderModel):
r"""
Deep Matrix Factorization Models for Recommender Systems.
For further details, please refer to the `paper <https://www.ijcai.org/Proceedings/2017/0447.pdf>`_
Args:
lr: Learning rate
reg: Regularization coefficient
user_mlp: List of units for each layer
item_mlp: List of activation functions
similarity: Number of factors dimension
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
DMF:
meta:
save_recs: True
epochs: 10
batch_size: 512
lr: 0.0001
reg: 0.001
user_mlp: (64,32)
item_mlp: (64,32)
similarity: cosine
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
self._params_list = [
("_learning_rate", "lr", "lr", 0.0001, None, None),
("_user_mlp", "user_mlp", "umlp", "(64,32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_item_mlp", "item_mlp", "imlp", "(64,32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_neg_ratio", "neg_ratio", "negratio", 5, None, None),
("_reg", "reg", "reg", 0.001, None, None),
("_similarity", "similarity", "sim", "cosine", None, None)
]
self.autoset_params()
self._max_ratings = np.max(self._data.sp_i_train_ratings)
self._transactions_per_epoch = self._data.transactions + self._neg_ratio * self._data.transactions
if self._batch_size < 1:
self._batch_size = self._data.transactions + self._neg_ratio * self._data.transactions
self._sampler = pws.Sampler(self._data.i_train_dict, self._data.sp_i_train_ratings, self._neg_ratio)
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._model = DeepMatrixFactorizationModel(self._num_users, self._num_items, self._user_mlp,
self._item_mlp, self._reg,
self._similarity, self._max_ratings,
self._data.sp_i_train_ratings, self._learning_rate,
self._seed)
@property
def name(self):
return "DMF"\
+ f"_{self.get_base_params_shortcut()}" \
+ f"_{self.get_params_shortcut()}"
def train(self):
if self._restore:
return self.restore_weights()
for it in self.iterate(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._transactions_per_epoch // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._transactions_per_epoch, self._batch_size):
steps += 1
loss += self._model.train_step(batch)
t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})
t.update()
self.evaluate(it, loss.numpy()/(it + 1))
def get_recommendations(self, k: int = 100):
predictions_top_k_test = {}
predictions_top_k_val = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(
np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])
)
)
recs_val, recs_test = self.process_protocol(k, predictions, offset, offset_stop)
predictions_top_k_val.update(recs_val)
predictions_top_k_test.update(recs_test)
return predictions_top_k_val, predictions_top_k_test
| 40.770492
| 156
| 0.606152
|
752eb799a3f4a11a947f093ef0b2e9e0306826d7
| 2,023
|
py
|
Python
|
check.py
|
lleshchi/stratis-cli
|
6e847d918d075dfd9548c66dd37a9b96b71e8f7d
|
[
"Apache-2.0"
] | null | null | null |
check.py
|
lleshchi/stratis-cli
|
6e847d918d075dfd9548c66dd37a9b96b71e8f7d
|
[
"Apache-2.0"
] | null | null | null |
check.py
|
lleshchi/stratis-cli
|
6e847d918d075dfd9548c66dd37a9b96b71e8f7d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# isort: STDLIB
import argparse
import subprocess
import sys
arg_map = {
"src/stratis_cli": [
"--reports=no",
"--disable=I",
"--disable=duplicate-code",
"--disable=invalid-name",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
"tests/blackbox/stratisd_cert.py": [
"--reports=no",
"--disable=I",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
"tests/blackbox/stratis_cli_cert.py": [
"--reports=no",
"--disable=I",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
"tests/blackbox/testlib": [
"--reports=no",
"--disable=I",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
"tests/whitebox": [
"--reports=no",
"--disable=I",
"--disable=duplicate-code",
"--disable=invalid-name",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
"bin/stratis": [
"--reports=no",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
}
def get_parser():
"""
Generate an appropriate parser.
:returns: an argument parser
:rtype: `ArgumentParser`
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"package", choices=arg_map.keys(), help="designates the package to test"
)
parser.add_argument("--ignore", help="ignore these files")
return parser
def get_command(namespace):
"""
Get the pylint command for these arguments.
:param `Namespace` namespace: the namespace
"""
cmd = ["pylint", namespace.package] + arg_map[namespace.package]
if namespace.ignore:
cmd.append("--ignore=%s" % namespace.ignore)
return cmd
def main():
args = get_parser().parse_args()
return subprocess.call(get_command(args), stdout=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
| 25.607595
| 80
| 0.556105
|
8eae67973809cc9288fe48f3dac0d99479dbd253
| 13,297
|
py
|
Python
|
cryptoapis/model/inline_response40081.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
cryptoapis/model/inline_response40081.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
cryptoapis/model/inline_response40081.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.get_fee_address_details_e400 import GetFeeAddressDetailsE400
globals()['GetFeeAddressDetailsE400'] = GetFeeAddressDetailsE400
class InlineResponse40081(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'request_id': (str,), # noqa: E501
'error': (GetFeeAddressDetailsE400,), # noqa: E501
'context': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'apiVersion', # noqa: E501
'request_id': 'requestId', # noqa: E501
'error': 'error', # noqa: E501
'context': 'context', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""InlineResponse40081 - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (GetFeeAddressDetailsE400):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, api_version, request_id, error, *args, **kwargs): # noqa: E501
"""InlineResponse40081 - a model defined in OpenAPI
Args:
api_version (str): Specifies the version of the API that incorporates this endpoint.
request_id (str): Defines the ID of the request. The `requestId` is generated by Crypto APIs and it's unique for every request.
error (GetFeeAddressDetailsE400):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
context (str): In batch situations the user can use the context to correlate responses with requests. This property is present regardless of whether the response was successful or returned as an error. `context` is specified by the user.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.api_version = api_version
self.request_id = request_id
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 46.65614
| 484
| 0.594721
|
0dac38756c4621fb1c0913ccf5b67f14d57fb598
| 4,025
|
py
|
Python
|
scripts/chr_view.py
|
lucasw/nes_cc65_demo
|
c9edbd146f95edb2fa337dc12b2e58c5295d852f
|
[
"BSD-3-Clause"
] | 5
|
2018-01-27T20:36:29.000Z
|
2022-03-03T01:49:00.000Z
|
scripts/chr_view.py
|
lucasw/nes_cc65_demo
|
c9edbd146f95edb2fa337dc12b2e58c5295d852f
|
[
"BSD-3-Clause"
] | 3
|
2017-03-29T13:03:22.000Z
|
2017-04-08T03:13:43.000Z
|
scripts/chr_view.py
|
lucasw/nes_cc65_demo
|
c9edbd146f95edb2fa337dc12b2e58c5295d852f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Lucas Walter
# March 2017
# load a nes .chr or .nes and view the graphics
# Probably should look at other linux/python chr viewers and editors,
# e.g. https://github.com/jmcmahan/nestile
# and not spend much more time on this.
import cv2
import numpy as np
import sys
if __name__ == '__main__':
scale = 9
sprite_width = 8
# yy-chr shows 16
sprites_per_row = 16
sprites_per_column = 8
image = np.zeros((sprite_width * sprites_per_column,
sprite_width * sprites_per_row, 3), np.uint8)
colors = []
colors.append((0, 0, 0))
colors.append((60, 90, 180))
colors.append((252, 188, 176))
colors.append((200, 76, 12))
image[:,:,0] = 228
if False: # for i in range(3):
image[1::4, 3::7, i] = colors[1][i]
# image[2::7, :, i] = colors[2][i]
# image[3::11, :, i] = colors[3][i]
# 16 bytes of contiguous memory makes a
# 8 x 8 sprite x 2bpp = 128
# 16 x 8 = 128
# So the first row of 8 pixels is
# 8pixels x 2bpp = 16 bits = 2 bytes
# with open(sys.argv[1], "rb") as fl:
chr_name = sys.argv[1]
row_offset = 0
if (len(sys.argv) > 2):
row_offset = int(sys.argv[2])
data = np.fromfile(chr_name, dtype=np.uint8)
print len(data), (len(data) / 64), chr_name
x = 0
y = 0
loop = True
for ind in range(row_offset * sprites_per_row * sprite_width, len(data), 16):
if not loop:
break
# print 'ind', ind
# if ind >= 32:
# break
if y >= image.shape[0]:
print 'y ', y, image.shape[0]
break
# get 8 pixels from 16 bytes
plane0 = data[ind:ind + 8]
plane1 = data[ind + 8:ind + 16]
pind = [0, 0, 0, 0, 0, 0, 0, 0]
# go through each row in current sprite
for j in range(8):
# build each pixel in current row
for i in range(8):
# first plane, lower bit
ir = 7 - i
plane0_bit = (plane0[j] & (0x1 << ir)) >> ir
plane1_bit = (plane1[j] & (0x1 << ir)) >> ir
# second plane, higher bit
pind[i] = plane0_bit | (plane1_bit << 1)
x1 = x + i
y1 = y + j
# print 'x1', x1, ', x', x, 'i', i, ', y', y
if x1 >= image.shape[1]:
print 'x ', x1, image.shape[1]
loop = False
break
# print 'pind', pind[i], i
for k in range(3):
image[y1, x1, k] = colors[pind[i]][2 - k]
# print y, x1, pind[xo], image[y, x1, :]
x += 8
if x >= sprites_per_row * sprite_width:
x = 0
y += 8
# print x, y
# go to next sprite
# TODO store sprites as individual numpy images
# then tile them in final image for visualization
if False: # if y % 8 == 0:
y -= 8
x += 8
# go to next row of tiles
if x >= image.shape[1]:
y += 8
x = 0
# print x, y
scaled_image = cv2.resize(image, (0, 0), fx = scale, fy = scale,
interpolation = cv2.INTER_NEAREST)
# pixel boundary grid
scaled_image[0::scale, :, 0] = 88
scaled_image[0::scale, :, 1] = 78
scaled_image[0::scale, :, 2] = 0
scaled_image[:, 0::scale, 0] = 66
scaled_image[:, 0::scale, 1] = 56
scaled_image[:, 0::scale, 2] = 0
# sprint boundary grid
scale *= 8
scaled_image[0::scale, :, 0] = 168
scaled_image[0::scale, :, 1] = 168
scaled_image[0::scale, :, 2] = 30
scaled_image[:, 0::scale, 0] = 106
scaled_image[:, 0::scale, 1] = 136
scaled_image[:, 0::scale, 2] = 30
while True:
key = cv2.waitKey(10)
if key == ord('q'):
print key, ord('q')
break
cv2.imshow("image", scaled_image)
| 30.492424
| 81
| 0.491677
|
894051b3a6af604420799c1da94da50e37becf28
| 1,355
|
py
|
Python
|
easyTX/client.py
|
VDHARV/easyTX-2.0
|
90cc9fcdccfd4ff267d13c14e5417d87df0475cc
|
[
"MIT"
] | null | null | null |
easyTX/client.py
|
VDHARV/easyTX-2.0
|
90cc9fcdccfd4ff267d13c14e5417d87df0475cc
|
[
"MIT"
] | null | null | null |
easyTX/client.py
|
VDHARV/easyTX-2.0
|
90cc9fcdccfd4ff267d13c14e5417d87df0475cc
|
[
"MIT"
] | null | null | null |
import cv2
import socket
import numpy as np
import base64
import easyTX.constants as constants
class Client():
"""Client class of easyTX module. Must be defined if the machine wants to recieve a continous stream of data.
"""
def __init__(self, port):
self.port = port
def conn(self):
"""Connects with server
"""
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.client_socket.bind(("", self.port))
def recv_frame(self):
"""Gets frame from server.
Run in loop.
Returns:
_numpy.ndarray_: Returns single frame.
"""
packet, _ = self.client_socket.recvfrom(constants.BUFF_SIZE)
data = base64.b64decode(packet, ' /')
npdata = np.fromstring(data, dtype = np.uint8)
frame = cv2.imdecode(npdata, 1)
return frame
def recv_data(self):
"""Recives data from the server.
Runs in loop.
Returns:
str: Any String data
"""
packet, _ = self.client_socket.recvfrom(constants.BUFF_SIZE)
data = packet.decode('utf-8')
return data
| 30.795455
| 113
| 0.623616
|
37f23f54ceb357abb3e9b3ec7666255c77c7b6ad
| 3,384
|
py
|
Python
|
tableauserverclient/server/request_options.py
|
complexsplit/server-client-python
|
608aa7694d0560ea3c8c37b10127b11207e56e8d
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
tableauserverclient/server/request_options.py
|
complexsplit/server-client-python
|
608aa7694d0560ea3c8c37b10127b11207e56e8d
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
tableauserverclient/server/request_options.py
|
complexsplit/server-client-python
|
608aa7694d0560ea3c8c37b10127b11207e56e8d
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
class RequestOptionsBase(object):
def apply_query_params(self, url):
raise NotImplementedError()
class RequestOptions(RequestOptionsBase):
class Operator:
Equals = 'eq'
GreaterThan = 'gt'
GreaterThanOrEqual = 'gte'
LessThan = 'lt'
LessThanOrEqual = 'lte'
In = 'in'
class Field:
CreatedAt = 'createdAt'
LastLogin = 'lastLogin'
Name = 'name'
OwnerName = 'ownerName'
SiteRole = 'siteRole'
Tags = 'tags'
UpdatedAt = 'updatedAt'
class Direction:
Desc = 'desc'
Asc = 'asc'
def __init__(self, pagenumber=1, pagesize=100):
self.pagenumber = pagenumber
self.pagesize = pagesize
self.sort = set()
self.filter = set()
def page_size(self, page_size):
self.pagesize = page_size
return self
def page_number(self, page_number):
self.pagenumber = page_number
return self
def apply_query_params(self, url):
params = []
if '?' in url:
url, existing_params = url.split('?')
params.append(existing_params)
if self.page_number:
params.append('pageNumber={0}'.format(self.pagenumber))
if self.page_size:
params.append('pageSize={0}'.format(self.pagesize))
if len(self.sort) > 0:
sort_options = (str(sort_item) for sort_item in self.sort)
ordered_sort_options = sorted(sort_options)
params.append('sort={}'.format(','.join(ordered_sort_options)))
if len(self.filter) > 0:
filter_options = (str(filter_item) for filter_item in self.filter)
ordered_filter_options = sorted(filter_options)
params.append('filter={}'.format(','.join(ordered_filter_options)))
return "{0}?{1}".format(url, '&'.join(params))
class ImageRequestOptions(RequestOptionsBase):
# if 'high' isn't specified, the REST API endpoint returns an image with standard resolution
class Resolution:
High = 'high'
def __init__(self, imageresolution=None):
self.image_resolution = imageresolution
def apply_query_params(self, url):
params = []
if self.image_resolution:
params.append('resolution={0}'.format(self.image_resolution))
return "{0}?{1}".format(url, '&'.join(params))
class PDFRequestOptions(RequestOptionsBase):
# if 'high' isn't specified, the REST API endpoint returns an image with standard resolution
class PageType:
A3 = "a3"
A4 = "a4"
A5 = "a5"
B4 = "b4"
B5 = "b5"
Executive = "executive"
Folio = "folio"
Ledger = "ledger"
Legal = "legal"
Letter = "letter"
Note = "note"
Quarto = "quarto"
Tabloid = "tabloid"
class Orientation:
Portrait = "portrait"
Landscape = "landscape"
def __init__(self, page_type=None, orientation=None):
self.page_type = page_type
self.orientation = orientation
def apply_query_params(self, url):
params = []
if self.page_type:
params.append('type={0}'.format(self.page_type))
if self.orientation:
params.append('orientation={0}'.format(self.orientation))
return "{0}?{1}".format(url, '&'.join(params))
| 29.426087
| 96
| 0.593676
|
e3f41b58d36fb2ccbf5103673501abe76ecb8d0b
| 3,845
|
py
|
Python
|
tsai/models/RNN_FCN.py
|
Niklas-groiss-1/tsai
|
78bdcfb34515fcedd6e87a0a6911662397a8b954
|
[
"Apache-2.0"
] | null | null | null |
tsai/models/RNN_FCN.py
|
Niklas-groiss-1/tsai
|
78bdcfb34515fcedd6e87a0a6911662397a8b954
|
[
"Apache-2.0"
] | null | null | null |
tsai/models/RNN_FCN.py
|
Niklas-groiss-1/tsai
|
78bdcfb34515fcedd6e87a0a6911662397a8b954
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/107_models.RNN_FCN.ipynb (unless otherwise specified).
__all__ = ['RNN_FCN', 'LSTM_FCN', 'GRU_FCN', 'MRNN_FCN', 'MLSTM_FCN', 'MGRU_FCN']
# Cell
from ..imports import *
from .layers import *
# Cell
class _RNN_FCN_Base(Module):
def __init__(self, c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8, bidirectional=False, shuffle=True,
fc_dropout=0., conv_layers=[128,256,128], kss=[7, 5, 3], se=0):
print("Convlayers", conv_layers)
print("kss", kss)
if shuffle: assert seq_len is not None, 'need seq_len if shuffle=True'
# RNN
self.rnn = self._cell(seq_len if shuffle else c_in, hidden_size, num_layers=rnn_layers, bias=bias, batch_first=True,
dropout=cell_dropout, bidirectional=bidirectional)
self.rnn_dropout = nn.Dropout(rnn_dropout) if rnn_dropout else noop
self.shuffle = Permute(0,2,1) if not shuffle else noop # You would normally permute x. Authors did the opposite.
# FCN
assert len(conv_layers) == len(kss)
self.convblock1 = ConvBlock(c_in, conv_layers[0], kss[0])
self.se1 = SqueezeExciteBlock(conv_layers[0], se) if se != 0 else noop
self.convblock2 = ConvBlock(conv_layers[0], conv_layers[1], kss[1])
self.se2 = SqueezeExciteBlock(conv_layers[1], se) if se != 0 else noop
self.convblock3 = ConvBlock(conv_layers[1], conv_layers[2], kss[2])
#self.se3 = SqueezeExciteBlock(conv_layers[2], se) if se != 0 else noop
#self.convblock4 = ConvBlock(conv_layers[2], conv_layers[3], kss[3])
#self.se4 = SqueezeExciteBlock(conv_layers[3], se) if se != 0 else noop
#self.convblock5 = ConvBlock(conv_layers[3], conv_layers[4], kss[4])
#self.se5 = SqueezeExciteBlock(conv_layers[4], se) if se != 0 else noop
#self.convblock6 = ConvBlock(conv_layers[4], conv_layers[5], kss[5])
#self.se6 = SqueezeExciteBlock(conv_layers[5], se) if se != 0 else noop
#self.convblock7 = ConvBlock(conv_layers[5], conv_layers[6], kss[6])
self.gap = GAP1d(1)
# Common
self.concat = Concat()
self.fc_dropout = nn.Dropout(fc_dropout) if fc_dropout else noop
self.fc = nn.Linear(hidden_size * (1 + bidirectional) + conv_layers[-1], c_out)
def forward(self, x):
# RNN
rnn_input = self.shuffle(x) # permute --> (batch_size, seq_len, n_vars) when batch_first=True
output, _ = self.rnn(rnn_input)
last_out = output[:, -1] # output of last sequence step (many-to-one)
last_out = self.rnn_dropout(last_out)
# FCN
x = self.convblock1(x)
x = self.se1(x)
x = self.convblock2(x)
x = self.se2(x)
x = self.convblock3(x)
#x = self.se3(x)
#x = self.convblock4(x)
#x = self.se4(x)
#x = self.convblock5(x)
#x = self.se5(x)
#x = self.convblock6(x)
#x = self.se6(x)
#x = self.convblock7(x)
x = self.gap(x)
# Concat
x = self.concat([last_out, x])
x = self.fc_dropout(x)
x = self.fc(x)
return x
class RNN_FCN(_RNN_FCN_Base):
_cell = nn.RNN
class LSTM_FCN(_RNN_FCN_Base):
_cell = nn.LSTM
class GRU_FCN(_RNN_FCN_Base):
_cell = nn.GRU
class MRNN_FCN(_RNN_FCN_Base):
_cell = nn.RNN
def __init__(self, *args, se=16, **kwargs):
super().__init__(*args, se=se, **kwargs)
class MLSTM_FCN(_RNN_FCN_Base):
_cell = nn.LSTM
def __init__(self, *args, se=16, **kwargs):
super().__init__(*args, se=se, **kwargs)
class MGRU_FCN(_RNN_FCN_Base):
_cell = nn.GRU
def __init__(self, *args, se=16, **kwargs):
super().__init__(*args, se=se, **kwargs)
| 37.696078
| 159
| 0.619766
|
459737fdbc9fbeb255c2e944da29291479f8afdb
| 547
|
py
|
Python
|
opt/barbarian/control/hostname.py
|
go-barbarians/barbarian-base
|
55b6fca56fcded360099fa7ea823a862b7bd7e62
|
[
"Apache-2.0"
] | null | null | null |
opt/barbarian/control/hostname.py
|
go-barbarians/barbarian-base
|
55b6fca56fcded360099fa7ea823a862b7bd7e62
|
[
"Apache-2.0"
] | null | null | null |
opt/barbarian/control/hostname.py
|
go-barbarians/barbarian-base
|
55b6fca56fcded360099fa7ea823a862b7bd7e62
|
[
"Apache-2.0"
] | null | null | null |
#!/opt/python27/bin/python
import os, sys, getopt, socket
def main(argv):
fqdn = False
try:
opts = getopt.gnu_getopt(argv,'f')
except getopt.GetoptError:
print "usage: hostname [-f]"
sys.exit(2)
for opt in opts:
if opt == '-f':
fqdn = True
else:
fqdn = False
try:
if fqdn:
print socket.getfqdn()
else:
print socket.gethostname()
sys.exit()
except OSError:
print "error"
sys.exit(2)
if __name__ == '__main__':
main(sys.argv[1:])
| 19.535714
| 40
| 0.550274
|
c97d81b90f854b4996b394dae2cf1470f6da58fc
| 143,818
|
py
|
Python
|
youtube_dl/extractor/common.py
|
nose-gnome/youtube-dl
|
8d657f3efc6b36753cec07832bdd4fa9274145bb
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/common.py
|
nose-gnome/youtube-dl
|
8d657f3efc6b36753cec07832bdd4fa9274145bb
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/common.py
|
nose-gnome/youtube-dl
|
8d657f3efc6b36753cec07832bdd4fa9274145bb
|
[
"Unlicense"
] | 1
|
2021-11-28T04:50:32.000Z
|
2021-11-28T04:50:32.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import ssl
import sys
import time
import math
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies_SimpleCookie,
compat_etree_Element,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
str_or_none,
str_to_int,
strip_or_none,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_timestamp: UNIX timestamp of the moment the video was released.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available
(uploaded).
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url", "duration" attributes with the same semantics
as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
exceptions.append(ssl.CertificateError)
try:
return self._downloader.urlopen(url_or_request)
except tuple(exceptions) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, **kwargs):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
video_info.update((key, value) for key, value in kwargs.items() if value is not None)
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
json_ld = []
for mobj in json_ld_list:
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal)
if not json_ld_item:
continue
if isinstance(json_ld_item, dict):
json_ld.append(json_ld_item)
elif isinstance(json_ld_item, (list, tuple)):
json_ld.extend(json_ld_item)
if json_ld:
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
if json_ld:
return json_ld
if default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_type(e):
interaction_type = e.get('interactionType')
if isinstance(interaction_type, dict):
interaction_type = interaction_type.get('@type')
return str_or_none(interaction_type)
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if isinstance(interaction_statistic, dict):
interaction_statistic = [interaction_statistic]
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = extract_interaction_type(is_e)
if not interaction_type:
continue
# For interaction count some sites provide string instead of
# an integer (as per spec) with non digit characters (e.g. ",")
# so extracting count with more relaxed str_to_int
interaction_count = str_to_int(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
author = e.get('author')
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
# author can be an instance of 'Organization' or 'Person' types.
# both types can have 'name' property(inherited from 'Thing' type). [1]
# however some websites are using 'Text' type instead.
# 1. https://schema.org/VideoObject
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if '@context' in e:
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
continue
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
if expected_type is None:
continue
else:
break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
if expected_type is None:
continue
else:
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
self.to_screen(
'%s: %s URL is invalid, skipping: %s'
% (video_id, item, error_to_compat_str(e.cause)))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playlist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
# for DailyMotion
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
if progressive_uri:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': progressive_uri,
})
formats.append(http_f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
mpd_doc, urlh = res
if mpd_doc is None:
return []
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id, mpd_base_url, mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
formats.append(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
ism_doc, urlh = res
if ism_doc is None:
return []
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
media_tags = [(media_tag, media_tag_name, media_type, '')
for media_tag, media_tag_name, media_type
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
for media_tag, _, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
signed = 'hdnea=' in manifest_url
if not signed:
# https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
manifest_url = re.sub(
r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
'', manifest_url).strip('?')
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
http_host = hosts.get('http')
if http_host and m3u8_formats and not signed:
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
qualities_length = len(qualities)
if len(m3u8_formats) in (qualities_length, qualities_length + 1):
i = 0
for f in m3u8_formats:
if f['vcodec'] != 'none':
for protocol in ('http', 'https'):
http_f = f.copy()
del http_f['manifest_url']
http_url = re.sub(
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
http_f.update({
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
'url': http_url,
'protocol': protocol,
})
formats.append(http_f)
i += 1
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies_SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| 46.892077
| 172
| 0.546886
|
509a81e4c8c718cb5aa127bf5b5aa3cc7c9627d7
| 12,676
|
py
|
Python
|
plugins/modules/oci_opsi_resource_forecast_trend_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_resource_forecast_trend_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_resource_forecast_trend_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_resource_forecast_trend_facts
short_description: Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
- Get Forecast predictions for CPU and Storage resources since a time in the past.
version_added: "2.9"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU and STORAGE.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D
type: list
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
utilization_level:
description:
- "Filter by utilization level by the following buckets:
- HIGH_UTILIZATION: DBs with utilization greater or equal than 75.
- LOW_UTILIZATION: DBs with utilization lower than 25.
- MEDIUM_HIGH_UTILIZATION: DBs with utilization greater or equal than 50 but lower than 75.
- MEDIUM_LOW_UTILIZATION: DBs with utilization greater or equal than 25 but lower than 50."
type: str
choices:
- "HIGH_UTILIZATION"
- "LOW_UTILIZATION"
- "MEDIUM_HIGH_UTILIZATION"
- "MEDIUM_LOW_UTILIZATION"
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific resource_forecast_trend
oci_opsi_resource_forecast_trend_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
resource_metric: resource_metric_example
"""
RETURN = """
resource_forecast_trend:
description:
- ResourceForecastTrend resource
returned: on success
type: complex
contains:
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: string
sample: 2020-12-06T00:00:00.000Z
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: string
sample: 2020-12-06T00:00:00.000Z
resource_metric:
description:
- Defines the type of resource metric (CPU, STORAGE)
returned: on success
type: string
sample: STORAGE
usage_unit:
description:
- Displays usage unit ( CORES, GB)
returned: on success
type: string
sample: CORES
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: string
sample: LINEAR
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: string
sample: 2020-05-01T00:00:00.000Z
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: string
sample: 2020-05-01T00:00:00.000Z
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: {
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"resource_metric": "STORAGE",
"usage_unit": "CORES",
"pattern": "LINEAR",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResourceForecastTrendFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"compartment_id",
"resource_metric",
]
def get_resource(self):
optional_get_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_type",
"database_id",
"statistic",
"forecast_days",
"forecast_model",
"utilization_level",
"confidence",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_database_insight_resource_forecast_trend,
compartment_id=self.module.params.get("compartment_id"),
resource_metric=self.module.params.get("resource_metric"),
**optional_kwargs
)
ResourceForecastTrendFactsHelperCustom = get_custom_class(
"ResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
ResourceForecastTrendFactsHelperCustom, ResourceForecastTrendFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_type=dict(
type="list", choices=["ADW-S", "ATP-S", "ADW-D", "ATP-D"]
),
database_id=dict(type="list"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
utilization_level=dict(
type="str",
choices=[
"HIGH_UTILIZATION",
"LOW_UTILIZATION",
"MEDIUM_HIGH_UTILIZATION",
"MEDIUM_LOW_UTILIZATION",
],
),
confidence=dict(type="int"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(resource_forecast_trend=result)
if __name__ == "__main__":
main()
| 35.707042
| 153
| 0.591354
|
60d4c66fabf8b1dd63221291b991b01a550b8fdf
| 565
|
py
|
Python
|
01_Day_Introduction/helloworld.py
|
fernandovicentinpavanello/30-days-of-Python
|
3e04ef64a0997bb71eeac57911e47f2f6414ae75
|
[
"MIT"
] | 1
|
2022-03-08T07:08:39.000Z
|
2022-03-08T07:08:39.000Z
|
01_Day_Introduction/helloworld.py
|
luizpavanello/30-days-of-Python
|
3c727a76b6185a5ba684c393c5cdfc759c3c4b01
|
[
"MIT"
] | null | null | null |
01_Day_Introduction/helloworld.py
|
luizpavanello/30-days-of-Python
|
3c727a76b6185a5ba684c393c5cdfc759c3c4b01
|
[
"MIT"
] | null | null | null |
# Introduction
# Day 1 - 30DaysOfPython Challenge
print("Hello World!")
print(3 + 5) # addition(+)
print(5 - 3) # subtraction(-)
print(3 * 5) # multiplication(*)
print(5 / 3) # division(/)
print(5 ** 2) # exponentiation(**)
print(5 % 3) # modulus(%)
print(5 // 3) # floor division(//)
# Checking data types
print(type(3)) # int
print(type(3.14)) # float
print(type(1 + 8j)) # complex
print(type("Hello World!")) # str
print(type([1, 2, 3])) # list
print(type({1: "one", 2: "two", 3: "three"})) # dict
print(type({1, 2, 3})) # set
print(type((1, 2, 3))) # tuple
| 23.541667
| 52
| 0.60354
|
857bb027a84e556b644958b1f5a3836348c0b7a6
| 2,170
|
py
|
Python
|
experiments/platynereis/cells/train_boundaries.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 13
|
2021-03-09T21:31:09.000Z
|
2022-03-21T05:24:26.000Z
|
experiments/platynereis/cells/train_boundaries.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 16
|
2021-03-02T23:19:34.000Z
|
2022-03-25T19:43:41.000Z
|
experiments/platynereis/cells/train_boundaries.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 4
|
2021-05-18T08:29:33.000Z
|
2022-02-11T12:16:20.000Z
|
import numpy as np
import torch_em
from torch_em.model import AnisotropicUNet
from torch_em.data.datasets import get_platynereis_cell_loader
def get_model():
model = AnisotropicUNet(
scale_factors=4*[[2, 2, 2]],
in_channels=1,
out_channels=1,
initial_features=32,
gain=2,
final_activation="Sigmoid"
)
return model
def get_loader(path, is_train, n_samples):
batch_size = 1
patch_shape = [32, 256, 256]
if is_train:
sample_ids = list(range(1, 10))
rois = {9: np.s_[:, :600, :]}
else:
sample_ids = [9]
rois = {9: np.s_[:, 600:, :]}
loader = get_platynereis_cell_loader(
path, patch_shape, sample_ids,
boundaries=True,
rois=rois,
batch_size=batch_size,
n_samples=n_samples,
download=True,
shuffle=True,
num_workers=8*batch_size,
)
return loader
def train_boundaries(args):
model = get_model()
train_loader = get_loader(args.input, True, n_samples=1000)
val_loader = get_loader(args.input, False, n_samples=100)
name = "boundary_model"
trainer = torch_em.default_segmentation_trainer(
name=name,
model=model,
train_loader=train_loader,
val_loader=val_loader,
learning_rate=1e-4,
mixed_precision=True,
log_image_interval=50,
optimizer_kwargs={"weight_decay": 0.0005}
)
if args.from_checkpoint:
trainer.fit(args.n_iterations, "latest")
else:
trainer.fit(args.n_iterations)
def check(args, train=True, val=True, n_images=2):
from torch_em.util.debug import check_loader
if train:
print("Check train loader")
loader = get_loader(args.input, is_train=True, n_samples=100)
check_loader(loader, n_images)
if val:
print("Check val loader")
loader = get_loader(args.input, is_train=False, n_samples=100)
check_loader(loader, n_images)
if __name__ == "__main__":
parser = torch_em.util.parser_helper()
args = parser.parse_args()
if args.check:
check(args)
else:
train_boundaries(args)
| 26.144578
| 70
| 0.636866
|
3bc0fdc151b571c594904676f785e61720cd8a74
| 497
|
py
|
Python
|
tools/configen/setup.py
|
evdcush/hydra
|
5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47
|
[
"MIT"
] | null | null | null |
tools/configen/setup.py
|
evdcush/hydra
|
5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47
|
[
"MIT"
] | null | null | null |
tools/configen/setup.py
|
evdcush/hydra
|
5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from setuptools import find_packages, setup
setup(
name="hydra-configen",
version="0.9.0dev5",
packages=find_packages(include=["configen"]),
entry_points={"console_scripts": ["configen = configen.configen:main"]},
author="Omry Yadan",
author_email="omry@fb.com",
url="http://hydra.cc",
include_package_data=True,
install_requires=["hydra-core>=1.0.0", "jinja2"],
)
| 27.611111
| 76
| 0.688129
|
ca487bb7f251a2d4b62b026934630bc25332c8b5
| 87
|
py
|
Python
|
licenses/config.py
|
jandolezal/licence
|
98a51043311f02ab9207731d10695e35e552b85c
|
[
"MIT"
] | null | null | null |
licenses/config.py
|
jandolezal/licence
|
98a51043311f02ab9207731d10695e35e552b85c
|
[
"MIT"
] | null | null | null |
licenses/config.py
|
jandolezal/licence
|
98a51043311f02ab9207731d10695e35e552b85c
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
conf = ConfigParser()
conf.read('config.ini')
| 12.428571
| 37
| 0.770115
|
07cb9a55d40dcfa2a4a0e33a9f267e784e8d5f1c
| 582
|
py
|
Python
|
src/20_busquedalineal.py
|
EliazBobadilla/POO-y-Algoritmos-con-Python
|
497f7a294e26220828c325785abf0bac392c3a18
|
[
"MIT"
] | 4
|
2021-02-28T17:18:10.000Z
|
2021-06-05T15:19:45.000Z
|
src/20_busquedalineal.py
|
EliazBobadilla/POO-y-Algoritmos-con-Python
|
497f7a294e26220828c325785abf0bac392c3a18
|
[
"MIT"
] | null | null | null |
src/20_busquedalineal.py
|
EliazBobadilla/POO-y-Algoritmos-con-Python
|
497f7a294e26220828c325785abf0bac392c3a18
|
[
"MIT"
] | 3
|
2021-02-28T17:18:12.000Z
|
2021-06-05T15:19:47.000Z
|
import random
def busqueda_lineal(lista, objetivo):
match = False
for elemento in lista: # O(n)
if elemento == objetivo:
match = True
break
return match
if __name__ == "__main__":
tamano_de_lista = int(input("De que tamano sera la lista? "))
objetivo = int(input("Que numero quieres encontrar? "))
lista = [random.randint(0, 100) for i in range(tamano_de_lista)]
encontrado = busqueda_lineal(lista, objetivo)
print(lista)
print(f'El elemento {objetivo} {"esta" if encontrado else "no esta"} en la lista')
| 24.25
| 86
| 0.64433
|
1f847ae38ef86b1d059a23dc84d93be1b3990223
| 1,890
|
py
|
Python
|
questions/maximum-sum-circular-subarray/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/maximum-sum-circular-subarray/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/maximum-sum-circular-subarray/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
Given a circular array C of integers represented by A, find the maximum possible sum of a non-empty subarray of C.
Here, a circular array means the end of the array connects to the beginning of the array. (Formally, C[i] = A[i] when 0 <= i < A.length, and C[i+A.length] = C[i] when i >= 0.)
Also, a subarray may only include each element of the fixed buffer A at most once. (Formally, for a subarray C[i], C[i+1], ..., C[j], there does not exist i <= k1, k2 <= j with k1 % A.length = k2 % A.length.)
Example 1:
Input: [1,-2,3,-2]
Output: 3
Explanation: Subarray [3] has maximum sum 3
Example 2:
Input: [5,-3,5]
Output: 10
Explanation: Subarray [5,5] has maximum sum 5 + 5 = 10
Example 3:
Input: [3,-1,2,-1]
Output: 4
Explanation: Subarray [2,-1,3] has maximum sum 2 + (-1) + 3 = 4
Example 4:
Input: [3,-2,2,-3]
Output: 3
Explanation: Subarray [3] and [3,-2,2] both have maximum sum 3
Example 5:
Input: [-2,-3,-1]
Output: -1
Explanation: Subarray [-1] has maximum sum -1
Note:
-30000 <= A[i] <= 30000
1 <= A.length <= 30000
"""
class Solution:
def maxSubarraySumCircular(self, A: List[int]) -> int:
if not A:
return 0
m = max(A)
su = sum(A)
rs = 0
ss1, ss2 = [0] * len(A), [0] * len(A)
s = 0
for i, e in enumerate(A):
rs += e
ss1[i] = rs
ss2[i] = su - rs
s += e
m = max(m, s)
if s < 0:
s = 0
for i in range(len(A)):
if i == 0:
continue
ss1[i] = max(ss1[i], ss1[i - 1])
for i in reversed(range(len(A))):
if i == len(A) - 1:
continue
ss2[i] = max(ss2[i], ss2[i + 1])
for i in range(len(A)):
m = max(m, ss1[i] + ss2[i])
# print(ss1, ss2)
return m
| 22.5
| 209
| 0.514286
|
ea818c14dbad8162d2309e8c388e9fc2d4075b00
| 9,392
|
py
|
Python
|
api/views/site.py
|
ropon/newpanel
|
c032a22d2bcf433300e733e074fb7921bcfca733
|
[
"Apache-2.0"
] | 1
|
2021-09-15T05:34:14.000Z
|
2021-09-15T05:34:14.000Z
|
api/views/site.py
|
ropon/newpanel
|
c032a22d2bcf433300e733e074fb7921bcfca733
|
[
"Apache-2.0"
] | null | null | null |
api/views/site.py
|
ropon/newpanel
|
c032a22d2bcf433300e733e074fb7921bcfca733
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/6/1 16:57
# @Author : Ropon
# @File : site.py
import json
from flask import Blueprint, request
from flask_restful import Api, Resource, reqparse
from api.models import Site, SiteInfo
from api.utils.response import BaseResponse, api_abort
from api.utils.config import www_path, logs_path, per_page, default_index, php_ver, extra_kwargs_dict
from api.utils.tools import check
from api.utils.site import PanelSite
site_bp = Blueprint("site_bp", __name__, url_prefix="/api/v1")
api = Api(site_bp, default_mediatype="application/json;charset=utf-8")
class SiteView(Resource):
def __init__(self):
self.parser = reqparse.RequestParser(bundle_errors=True)
if request.method == "POST":
self.parser.add_argument('site_name', type=str, required=True, help='站点名不能为空', location="json", trim=True)
self.parser.add_argument('bind_domain', type=str, required=True, help='绑定域名不能为空', location="json",
trim=True)
elif request.method == "DELETE" or "PUT":
self.parser.add_argument('nid', type=int, required=True, help='nid不能为空', location="json", trim=True)
self.siteop = PanelSite()
def get(self):
nid = request.args.get("nid", "", type=int)
status = request.args.get("status", 1, type=int)
page = request.args.get("page", 1, type=int)
num = request.args.get("num", per_page, type=int)
extra = {"status": status}
site_name = request.args.get("site_name")
bind_domain = request.args.get("bind_domain")
res = BaseResponse()
if nid:
extra["nid"] = nid
if site_name:
extra["site_name"] = site_name
sites = Site.search(extra, bind_domain, page, num)
if not sites.items:
page = sites.pages
sites = Site.search(extra, bind_domain, page, num)
res.pages = sites.pages
if sites.has_prev:
res.prev = page - 1
if sites.has_next:
res.next = page + 1
site_list = []
for site in sites.items:
site_dict = site.to_json()
site_info = SiteInfo.sget(site.nid)
if site_info:
site_dict["site_info"] = site_info.to_json()
site_list.append(site_dict)
res.page = page
res.total = sites.pages * num
res.data = site_list
return res.dict
def post(self):
rets = self.siteop.check()
if rets:
api_abort(httpcode=400, errcode=4025, key=rets)
self.parser.parse_args()
orgin_data = request.json
site_name = orgin_data.get("site_name", "")
bind_domain = orgin_data.get("bind_domain", "")
if site_name == "" or bind_domain == "":
api_abort(errcode=4012)
orgin_data["root_path"] = orgin_data.get("root_path") or f'{www_path}/{site_name}'
orgin_data["note"] = orgin_data.get("note") or site_name
# 检查域名、路径合法性及站点、域名重复性
parms = {"site_name": site_name}
check(Site, bind_domain, parms, orgin_data.get("root_path"))
orgin_data.pop("ftpinfo")
orgin_data.pop("mysqlinfo")
# orgin_data.pop("sslinfo")
siteinfo_orgin_data = orgin_data.pop("site_info", {})
if siteinfo_orgin_data.get('is_log'):
siteinfo_orgin_data["log_path"] = f'{logs_path}/{site_name}'
else:
siteinfo_orgin_data["log_path"] = ""
# 规范 port is_ssl 值
if siteinfo_orgin_data.get("port") == 80:
siteinfo_orgin_data["is_ssl"] = False
elif siteinfo_orgin_data.get("port") == 443:
siteinfo_orgin_data["is_ssl"] = True
if siteinfo_orgin_data.get("is_ssl") == False:
siteinfo_orgin_data["port"] = 80
elif siteinfo_orgin_data.get("is_ssl") == True:
siteinfo_orgin_data["port"] = 443
extra_kwargs = siteinfo_orgin_data.pop("extra_kwargs", {}) or extra_kwargs_dict
domain_301 = siteinfo_orgin_data.get("domain_301")
if extra_kwargs.get('set_301') and domain_301 == "":
api_abort(httpcode=400, errcode=4025, key="开启301,跳转域名不能为空")
check(obj_domain=domain_301)
siteinfo_orgin_data["extra_kwargs"] = json.dumps(extra_kwargs)
phpver = siteinfo_orgin_data.get("php_ver") or php_ver
create_dict = {
"site_name": site_name,
"bind_domain": bind_domain.replace(",", " "),
"root_path": orgin_data.get("root_path"),
"is_ssl": siteinfo_orgin_data.get("is_ssl") or False,
"is_log": siteinfo_orgin_data.get('is_log') or False,
"log_path": siteinfo_orgin_data.get("log_path"),
"domain_301": siteinfo_orgin_data.get("domain_301") or "",
"default_index": siteinfo_orgin_data.get("default_index") or default_index,
"php_ver": phpver.replace(".", ""),
"extra_kwargs": extra_kwargs
}
# 创建站点
res = self.siteop.create_site(**create_dict)
if res:
api_abort(httpcode=400, errcode=4025, key=res)
siteobj = Site(**orgin_data)
siteobj.add()
site_dict = siteobj.to_json()
siteinfo_orgin_data["site_id"] = siteobj.nid
siteinfoobj = SiteInfo(**siteinfo_orgin_data)
siteinfoobj.add()
site_info = SiteInfo.sget(siteobj.nid)
if site_info:
site_dict["site_info"] = site_info.to_json()
res = BaseResponse()
res.data = site_dict
return res.dict
def delete(self):
self.parser.parse_args()
nid = request.json.get("nid")
print(nid)
if nid == "":
return api_abort(errcode=4012)
siteobj = Site.get(nid)
siteinfoobj = SiteInfo.sget(nid)
if siteobj:
self.siteop.delete_site(site_name=siteobj.site_name, root_path=siteobj.root_path, is_ssl=siteinfoobj.is_ssl,
log_path=siteinfoobj.log_path)
if siteinfoobj:
SiteInfo.sdelete(nid)
Site.delete(nid)
res = BaseResponse()
res.dict.pop("data")
return res.dict
else:
return api_abort(errcode=4018)
def put(self):
self.parser.parse_args()
fields = {"nid", "site_name", "bind_domain", "root_path", "site_info", "note"}
orgin_data = {}
if fields.issubset(set(request.json.keys())):
for key in fields:
orgin_data[key] = request.json.get(key)
orgin_data = request.json
nid = orgin_data.pop("nid")
if nid == "":
return api_abort(errcode=4012)
bind_domain = orgin_data.get("bind_domain")
# 站点名、路径不支持修改
orgin_data.pop("site_name", "")
orgin_data.pop("root_path", "")
site_obj = Site.get(nid)
if site_obj:
check(Site, obj_domain=bind_domain, site_name=site_obj.site_name)
bind_domain = bind_domain or site_obj.bind_domain
siteinfo_orgin_data = orgin_data.pop("site_info", {})
if siteinfo_orgin_data.get('is_log'):
siteinfo_orgin_data["log_path"] = f'{logs_path}/{site_obj.site_name}'
else:
siteinfo_orgin_data["log_path"] = ""
if siteinfo_orgin_data:
extra_kwargs = siteinfo_orgin_data.pop("extra_kwargs", {})
if extra_kwargs:
siteinfo_orgin_data["extra_kwargs"] = json.dumps(extra_kwargs)
if orgin_data:
site_info = SiteInfo.sget(nid)
php_ver = siteinfo_orgin_data.get('php_ver') or site_info.php_ver
domain_301 = siteinfo_orgin_data.get("domain_301")
if extra_kwargs.get('set_301') and domain_301 == "":
api_abort(httpcode=400, errcode=4025, key="开启301,跳转域名不能为空")
check(obj_domain=domain_301)
create_dict = {
"site_name": site_obj.site_name,
"bind_domain": bind_domain.replace(",", " "),
"root_path": site_obj.root_path,
"is_ssl": siteinfo_orgin_data.get('is_ssl'),
"is_log": siteinfo_orgin_data.get('is_log'),
"log_path": siteinfo_orgin_data.get('log_path'),
"domain_301": domain_301,
"default_index": site_info.default_index,
"php_ver": php_ver.replace(".", ""),
"extra_kwargs": extra_kwargs
}
# 更新站点调用 创建站点
res = self.siteop.create_site(**create_dict)
if res:
api_abort(httpcode=400, errcode=4025, key=res)
SiteInfo.supdate(nid, siteinfo_orgin_data)
Site.update(nid, orgin_data)
site_dict = site_obj.to_json()
if site_info:
site_dict["site_info"] = site_info.to_json()
res = BaseResponse()
res.data = site_dict
return res.dict
else:
# 未传入修改的值
return api_abort(errcode=4019)
else:
# 记录不存在
return api_abort(errcode=4018)
api.add_resource(SiteView, "/site")
| 42.116592
| 120
| 0.580068
|
c26def24de34feefee0af61fdd2728bcc4ab8c1a
| 935
|
py
|
Python
|
Newbies/formatting.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2021-12-17T11:03:13.000Z
|
2021-12-17T11:03:13.000Z
|
Newbies/formatting.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2020-02-05T00:14:43.000Z
|
2020-02-06T09:22:49.000Z
|
Newbies/formatting.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Formatting examples."""
print('{0:9} | {1:8}'.format('Vegetable', 'Quantity'))
print('{0:9} | {1:8}'.format('Asparagus', 3))
print('{0:9} | {1:8}'.format('Onions', 10))
print('{0:9} | {1:<8}'.format('Vegetable', 'Quantity'))
print('{0:9} | {1:<8}'.format('Asparagus', 3))
print('{0:9} | {1:<8}'.format('Onions', 10))
print('{0:8} | {1:<8}'.format('Vegetable', 'Quantity'))
print('{0:9} | {1:<8.2f}'.format('Asparagus', 2.33333))
print('{0:9} | {1:<8.2f}'.format('Onions', 10))
print('{0:^9} | {1:^8}'.format('Vegetable', 'Quantity'))
print('{0:^9} | {1:^8}'.format('Asparagus', 3))
print('{0:^9} | {1:^8}'.format('Onions', 10))
print('{0:>8} | {1:>8}'.format('Vegetable', 'Quantity'))
print('{0:>9} | {1:>8.2f}'.format('Asparagus', 2.33333))
print('{0:>9} | {1:>8.2f}'.format('Onions', 10))
VEGETABLE = input('Enter a name of a vegetable: ')
print()
print('{} is a lovely vegetable.'.format(VEGETABLE))
| 34.62963
| 56
| 0.567914
|
b67cae1910e3a352aee25cdea444c02f4e3a7e03
| 7,124
|
py
|
Python
|
HLACERPipeline/Training/Stanford.py
|
CDCgov/DCPC
|
c3fadef1bd6345e01a58afef051491d8ef6a7f93
|
[
"Apache-2.0"
] | 6
|
2018-11-03T22:43:35.000Z
|
2022-02-15T17:51:33.000Z
|
HLACERPipeline/Training/Stanford.py
|
CDCgov/DCPC
|
c3fadef1bd6345e01a58afef051491d8ef6a7f93
|
[
"Apache-2.0"
] | 2
|
2019-04-08T03:42:59.000Z
|
2019-10-28T13:42:59.000Z
|
HLACERPipeline/Training/Stanford.py
|
CDCgov/DCPC
|
c3fadef1bd6345e01a58afef051491d8ef6a7f93
|
[
"Apache-2.0"
] | 10
|
2017-04-10T21:40:22.000Z
|
2022-02-21T16:50:10.000Z
|
import sys
import os
import json
import glob
from Client import ServiceClient
import time
import sys
import traceback
sys.path.append('..')
from PostTokenizer_Stanford import PostTokenizer
from PostSentenceSplitter_Stanford import PostSentenceSplitter
from FeatureExtractor import FeatureExtractor
from merge_bio import merge_bio
from CRFRunner import CRFRunner
from BIOtoANN import BIOtoANN
from ANNtoLIF import ANNtoLIF
from StanfordPOSTagger import LAPPS_StanfordPOSTagger
from stanford_wrapper import *
"""
This file contains the main function for running Stanford pipeline. It will start by generating BIO files and finish when
it generates the resulted LIF file.
"""
def text_to_lif(text):
lif_wrapper = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:wrap.text_1.0.0')
lif_result = lif_wrapper.execute(text)
return lif_result
def tokenizer(lif):
stanford_tokenizer = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:stanford.tokenizer_2.0.0')
tokenier_lif = stanford_tokenizer.execute(lif)
return tokenier_lif
def stanford_tokenizer(lif):
st = StanfordTokenizer(lif)
st.call_tokenizer()
stanford_tokenizer_lif = json.dumps(st.lif_parser.data)
return stanford_tokenizer_lif
def post_tokenizer(lif, ann):
post_tokenizer = PostTokenizer(ann_filename=ann, lif_string=lif)
post_tokenizer.load_ann()
post_tokenizer.extract_tag()
post_tokenizer_lif = json.dumps(post_tokenizer.lif_loader.data)
return post_tokenizer_lif
def sentence_splitter(lif):
stanford_sentence_splitter = ServiceClient('http://vassar.lappsgrid.org/wsdl/anc:stanford.splitter_2.0.0')
sentence_lif = stanford_sentence_splitter.execute(lif)
return sentence_lif
def stanford_sentence_splitter(lif):
sentence = StanfordSentenceSplitter(lif)
sentence.call_splitter()
stantord_sentence_splitter_lif = json.dumps(sentence.lif_parser.data)
return stantord_sentence_splitter_lif
def post_sentence_splitter(lif):
post_sentence_splitter = PostSentenceSplitter(lif_string=lif)
post_sentence_splitter.parse_sentence()
post_sentence_splitter_lif = json.dumps(post_sentence_splitter.parser.data)
return post_sentence_splitter_lif
def pos_tagger(lif):
stanford_pos = ServiceClient(
'http://eldrad.cs-i.brandeis.edu:8080/service_manager/wsdl/brandeis_eldrad_grid_1:stanfordnlp.postagger_2.0.3')
pos_lif = stanford_pos.execute(lif)
return pos_lif
def stanford_pos_tagger(lif):
tagger = LAPPS_StanfordPOSTagger(lif_string=lif)
tagger.pos_tagger()
pos_lif = json.dumps(tagger.lif_parser.data)
return pos_lif
def feature_extractor(lif, file_num, left_info, right_info):
extractor = FeatureExtractor(lif_string=lif)
output_filename = "output/bio/stanford/train/" + str(file_num) + ".bio"
position_filename = 'output/bio/stanford/tagged/' + file_num + '.pos'
extractor.extract_tokens()
extractor.filter_tokens()
extractor.save_position(position_filename)
extractor.extract_pos()
# extractor.extract_snomedct()
if int(left_info) != 0 or int(right_info) != 0:
extractor.extract_neighbor(int(left_info), int(right_info), 1)
extractor.write_bio(output_filename)
def workflow_run(ann_filename):
file_num = str(ann_filename).split('/')[2].split('.')[0]
input_file = open(ann_filename)
ann_data = json.load(input_file)
input_text = ann_data['__text']
lif_result = text_to_lif(input_text)
tokenizer_lif = ""
try:
tokenizer_lif = tokenizer(lif_result)
except:
tokenizer_lif = stanford_tokenizer(lif_result)
post_tokenizer_lif = post_tokenizer(tokenizer_lif, ann_filename)
sentence_lif = ""
try:
sentence_lif = sentence_splitter(post_tokenizer_lif)
except:
sentence_lif = sentence_splitter(post_tokenizer_lif)
post_sentence_lif = post_sentence_splitter(sentence_lif)
pos_lif = ""
try:
pos_lif = pos_tagger(post_sentence_lif)
except:
pos_lif = stanford_pos_tagger(post_sentence_lif)
feature_extractor(pos_lif, file_num, 2, 2)
def run_batch(ann_files):
ann_list = glob.glob(ann_files)
for ann_filename in ann_list:
time.sleep(10)
try:
ann_filename = ann_filename.replace('\\', '/')
file_num = str(ann_filename).split('/')[2].split('.')[0]
print(ann_filename)
workflow_run(ann_filename)
except:
traceback.print_exc()
print("Exceptions occurs when processing the file ", ann_filename)
def create_output_dir():
bio_output_1 = 'output/bio/stanford/train'
bio_output_2 = 'output/bio/stanford/tagged'
ann_output_1 = 'output/bio/stanford/ann'
lif_output = 'output/stanford_lif'
ann_output_2 = 'output/stanford_ann'
if not os.path.exists(bio_output_1):
os.mkdir(bio_output_1)
if not os.path.exists(bio_output_2):
os.mkdir(bio_output_2)
if not os.path.exists(ann_output_1):
os.mkdir(ann_output_1)
if not os.path.exists(lif_output):
os.mkdir(lif_output)
if not os.path.exists(ann_output_2):
os.mkdir(ann_output_2)
if __name__ == "__main__":
start_time = time.time()
ann_folder = 'input/CDC_ann/*.ann'
create_output_dir()
run_batch(ann_folder)
bio_folder = 'output/bio/stanford/train/*.bio'
train_bio = 'output/bio/stanford/stanford_2.bio'
train_files = glob.glob(bio_folder)
merge_bio(train_files, train_bio)
finish_time = time.time()
print("Finish Processing all files! --- %s seconds ---" % (finish_time - start_time))
start_train = time.time()
model_file = 'output/bio/stanford/stanford_model'
template_file = 'output/bio/stanford/template'
crf_runner = CRFRunner(train_bio, bio_folder, model_file=model_file, template_file=template_file, source='stanford')
crf_runner.crf_train()
crf_runner.crf_test()
print("Finish Train CRF! --- %s seconds ---" % (time.time() - start_train))
start_eval = time.time()
tagged_bio_folder = 'output/bio/stanford/tagged/*.bio'
tagged_bio_files = glob.glob(tagged_bio_folder)
merge_bio(tagged_bio_files, 'output/bio/stanford/stanford_tagged.bio')
for bio_filename in tagged_bio_files:
bio_filename = bio_filename.replace('\\', '/')
print(bio_filename)
ann_converter = BIOtoANN(bio_filename, source='stanford')
ann_converter.extract_bio_tags()
ann_converter.update_ann()
ann_converter.append_header()
tagged_ann_folder = "output/bio/stanford/ann/*.ann"
tagged_ann_files = glob.glob(tagged_ann_folder)
for ann_filename in tagged_ann_files:
ann_filename = ann_filename.replace('\\', '/')
print(ann_filename)
lif_converter = ANNtoLIF(ann_filename, source='stanford')
lif_converter.initialize_lif()
lif_converter.extract_text()
lif_converter.extract_tags()
print("Finish Evaluate all files! --- %s seconds ---" % (time.time() - start_eval))
print("Finish the whole Stanford CER Pipeline! --- %s seconds ---" % (time.time() - start_time))
| 35.442786
| 122
| 0.726558
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.